Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
dd23207
idpf: convert control queue mutex to a spinlock
PlaidCat Sep 16, 2025
784526d
udmabuf: fix a buf size overflow issue during udmabuf creation
PlaidCat Sep 16, 2025
d7b9b66
crypto: engine - Remove prepare/unprepare request
PlaidCat Sep 16, 2025
1a168d8
crypto: tegra - do not transfer req when tegra init fails
PlaidCat Sep 16, 2025
7a72e84
crypto: tegra - remove redundant error check on ret
PlaidCat Sep 16, 2025
746db31
crypto: tegra - remove unneeded crypto_engine_stop() call
PlaidCat Sep 16, 2025
38ed63d
crypto: tegra - Use separate buffer for setkey
PlaidCat Sep 16, 2025
e4e992a
crypto: tegra - Do not use fixed size buffers
PlaidCat Sep 16, 2025
02f7be5
crypto: tegra - finalize crypto req on error
PlaidCat Sep 16, 2025
048f20b
crypto: tegra - check return value for hash do_one_req
PlaidCat Sep 16, 2025
1a36acd
crypto: tegra - Transfer HASH init function to crypto engine
PlaidCat Sep 16, 2025
1f66a0f
crypto: tegra - Fix HASH intermediate result handling
PlaidCat Sep 16, 2025
5211e2c
crypto: tegra - Fix CMAC intermediate result handling
PlaidCat Sep 16, 2025
c7bbc2b
crypto: tegra - Set IV to NULL explicitly for AES ECB
PlaidCat Sep 16, 2025
03b0d53
crypto: tegra - Reserve keyslots to allocate dynamically
PlaidCat Sep 16, 2025
3f68552
crypto: tegra - Use HMAC fallback when keyslots are full
PlaidCat Sep 16, 2025
9adef65
crypto: tegra - Fix format specifier in tegra_sha_prep_cmd()
PlaidCat Sep 16, 2025
56cf9f2
crypto: tegra - Fix IV usage for AES ECB
PlaidCat Sep 16, 2025
ab611f6
smb: convert to ctime accessor functions
PlaidCat Sep 16, 2025
e1c8c30
smb: client: fix race with concurrent opens in unlink(2)
PlaidCat Sep 16, 2025
571678e
smb: client: fix race with concurrent opens in rename(2)
PlaidCat Sep 16, 2025
71e4dca
ibmveth: Always stop tx queues during close
PlaidCat Sep 16, 2025
8119e3e
net: ibmveth: make veth_pool_store stop hanging
PlaidCat Sep 16, 2025
7492410
powerpc/pseries/iommu: Fix kmemleak in TCE table userspace view
PlaidCat Sep 16, 2025
4d2fb3e
Rebuild rocky9_6 with kernel-5.14.0-570.41.1.el9_6
PlaidCat Sep 16, 2025
13494e0
scsi: lpfc: Use memcpy() for BIOS version
PlaidCat Sep 16, 2025
ca495c1
drm/vkms: Fix use after free and double free on init error
PlaidCat Sep 16, 2025
61a583a
drm/gem: Acquire references on GEM handles for framebuffers
PlaidCat Sep 16, 2025
304da2d
drm/framebuffer: Acquire internal references on GEM handles
PlaidCat Sep 16, 2025
08cd329
powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory
PlaidCat Sep 16, 2025
41f8526
powerpc/pseries/iommu: create DDW for devices with DMA mask less than…
PlaidCat Sep 16, 2025
dd29e40
posix-cpu-timers: fix race between handle_posix_cpu_timers() and posi…
PlaidCat Sep 16, 2025
4eb20e2
Rebuild rocky9_6 with kernel-5.14.0-570.42.2.el9_6
PlaidCat Sep 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
File renamed without changes.
2 changes: 1 addition & 1 deletion Makefile.rhelver
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ RHEL_MINOR = 6
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 570.39.1
RHEL_RELEASE = 570.42.2

#
# ZSTREAM
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn;
u64 memory_hotplug_max(void);
u64 hot_add_drconf_memory_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -1354,7 +1354,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
return nid;
}

static u64 hot_add_drconf_memory_max(void)
u64 hot_add_drconf_memory_max(void)
{
struct device_node *memory = NULL;
struct device_node *dn = NULL;
Expand Down
141 changes: 111 additions & 30 deletions arch/powerpc/platforms/pseries/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ enum {
enum {
DDW_EXT_SIZE = 0,
DDW_EXT_RESET_DMA_WIN = 1,
DDW_EXT_QUERY_OUT_SIZE = 2
DDW_EXT_QUERY_OUT_SIZE = 2,
DDW_EXT_LIMITED_ADDR_MODE = 3
};

static struct iommu_table *iommu_pseries_alloc_table(int node)
Expand Down Expand Up @@ -196,7 +197,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)

static void tce_free_pSeries(struct iommu_table *tbl)
{
if (!tbl->it_userspace)
if (tbl->it_userspace)
tce_iommu_userspace_view_free(tbl);
}

Expand Down Expand Up @@ -1287,17 +1288,13 @@ static LIST_HEAD(failed_ddw_pdn_list);

static phys_addr_t ddw_memory_hotplug_max(void)
{
resource_size_t max_addr = memory_hotplug_max();
struct device_node *memory;
resource_size_t max_addr;

for_each_node_by_type(memory, "memory") {
struct resource res;

if (of_address_to_resource(memory, 0, &res))
continue;

max_addr = max_t(resource_size_t, max_addr, res.end + 1);
}
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
max_addr = hot_add_drconf_memory_max();
#else
max_addr = memblock_end_of_DRAM();
#endif

return max_addr;
}
Expand Down Expand Up @@ -1334,6 +1331,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
ret);
}

/*
* Platforms support placing PHB in limited address mode starting with LoPAR
* level 2.13 implement. In this mode, the DMA address returned by DDW is over
* 4GB but, less than 64-bits. This benefits IO adapters that don't support
* 64-bits for DMA addresses.
*/
static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn)
{
int ret;
u32 cfg_addr, reset_dma_win, las_supported;
u64 buid;
struct device_node *dn;
struct pci_dn *pdn;

ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
if (ret)
goto out;

ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported);

/* Limited Address Space extension available on the platform but DDW in
* limited addressing mode not supported
*/
if (!ret && !las_supported)
ret = -EPROTO;

if (ret) {
dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret);
goto out;
}

dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);

ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid),
BUID_LO(buid), 1);
if (ret)
dev_info(&dev->dev,
"ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ",
reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
ret);

out:
return ret;
}

/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
static int iommu_get_page_shift(u32 query_page_size)
{
Expand Down Expand Up @@ -1401,7 +1446,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
*
* returns true if can map all pages (direct mapping), false otherwise..
*/
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask)
{
int len = 0, ret;
int max_ram_len = order_base_2(ddw_memory_hotplug_max());
Expand All @@ -1420,6 +1465,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
bool pmem_present;
struct pci_dn *pci = PCI_DN(pdn);
struct property *default_win = NULL;
bool limited_addr_req = false, limited_addr_enabled = false;
int dev_max_ddw;
int ddw_sz;

dn = of_find_node_by_type(NULL, "ibm,pmemory");
pmem_present = dn != NULL;
Expand All @@ -1446,7 +1494,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
* ibm,create-pe-dma-window
* ibm,remove-pe-dma-window
* for the given node in that order.
* the property is actually in the parent, not the PE
*/
Expand All @@ -1466,6 +1513,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ret != 0)
goto out_failed;

/* DMA Limited Addressing required? This is when the driver has
* requested to create DDW but supports mask which is less than 64-bits
*/
limited_addr_req = (dma_mask != DMA_BIT_MASK(64));

/* place the PHB in Limited Addressing mode */
if (limited_addr_req) {
if (limited_dma_window(dev, pdn))
goto out_failed;

/* PHB is in Limited address mode */
limited_addr_enabled = true;
}

/*
* If there is no window available, remove the default DMA window,
* if it's present. This will make all the resources available to the
Expand Down Expand Up @@ -1512,6 +1573,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_failed;
}

/* Maximum DMA window size that the device can address (in log2) */
dev_max_ddw = fls64(dma_mask);

/* If the device DMA mask is less than 64-bits, make sure the DMA window
* size is not bigger than what the device can access
*/
ddw_sz = min(order_base_2(query.largest_available_block << page_shift),
dev_max_ddw);

/*
* The "ibm,pmemory" can appear anywhere in the address space.
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
Expand All @@ -1520,23 +1590,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
len = max_ram_len;
if (pmem_present) {
if (query.largest_available_block >=
(1ULL << (MAX_PHYSMEM_BITS - page_shift)))
if (ddw_sz >= MAX_PHYSMEM_BITS)
len = MAX_PHYSMEM_BITS;
else
dev_info(&dev->dev, "Skipping ibm,pmemory");
}

/* check if the available block * number of ptes will map everything */
if (query.largest_available_block < (1ULL << (len - page_shift))) {
if (ddw_sz < len) {
dev_dbg(&dev->dev,
"can't map partition max 0x%llx with %llu %llu-sized pages\n",
1ULL << len,
query.largest_available_block,
1ULL << page_shift);

len = order_base_2(query.largest_available_block << page_shift);

len = ddw_sz;
dynamic_mapping = true;
} else {
direct_mapping = !default_win_removed ||
Expand All @@ -1550,8 +1618,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
if (default_win_removed && pmem_present && !direct_mapping) {
/* DDW is big enough to be split */
if ((query.largest_available_block << page_shift) >=
MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
if ((1ULL << ddw_sz) >=
MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {

direct_mapping = true;

/* offset of the Dynamic part of DDW */
Expand All @@ -1562,8 +1631,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dynamic_mapping = true;

/* create max size DDW possible */
len = order_base_2(query.largest_available_block
<< page_shift);
len = ddw_sz;
}
}

Expand Down Expand Up @@ -1603,7 +1671,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)

if (direct_mapping) {
/* DDW maps the whole partition, so enable direct DMA mapping */
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
win64->value, tce_setrange_multi_pSeriesLP_walk);
if (ret) {
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
Expand Down Expand Up @@ -1691,7 +1759,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
__remove_dma_window(pdn, ddw_avail, create.liobn);

out_failed:
if (default_win_removed)
if (default_win_removed || limited_addr_enabled)
reset_dma_window(dev, pdn);

fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
Expand All @@ -1710,6 +1778,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
(1ULL << max_ram_len);

dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n",
limited_addr_req, limited_addr_enabled, direct_mapping);

return direct_mapping;
}

Expand Down Expand Up @@ -1835,8 +1906,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
{
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;

/* only attempt to use a new window if 64-bit DMA is requested */
if (dma_mask < DMA_BIT_MASK(64))
/* For DDW, DMA mask should be more than 32-bits. For mask more then
* 32-bits but less then 64-bits, DMA addressing is supported in
* Limited Addressing mode.
*/
if (dma_mask <= DMA_BIT_MASK(32))
return false;

dev_dbg(&pdev->dev, "node is %pOF\n", dn);
Expand All @@ -1849,7 +1923,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
*/
pdn = pci_dma_find(dn, NULL);
if (pdn && PCI_DN(pdn))
return enable_ddw(pdev, pdn);
return enable_ddw(pdev, pdn, dma_mask);

return false;
}
Expand Down Expand Up @@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
struct memory_notify *arg = data;
int ret = 0;

/* This notifier can get called when onlining persistent memory as well.
* TCEs are not pre-mapped for persistent memory. Persistent memory will
* always be above ddw_memory_hotplug_max()
*/

switch (action) {
case MEM_GOING_ONLINE:
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
if (window->direct) {
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
ddw_memory_hotplug_max()) {
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
}
Expand All @@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
case MEM_OFFLINE:
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
if (window->direct) {
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
ddw_memory_hotplug_max()) {
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
}
Expand Down
Loading