52
52
enum {
53
53
DDW_EXT_SIZE = 0 ,
54
54
DDW_EXT_RESET_DMA_WIN = 1 ,
55
- DDW_EXT_QUERY_OUT_SIZE = 2
55
+ DDW_EXT_QUERY_OUT_SIZE = 2 ,
56
+ DDW_EXT_LIMITED_ADDR_MODE = 3
56
57
};
57
58
58
59
static struct iommu_table * iommu_pseries_alloc_table (int node )
@@ -1330,6 +1331,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
1330
1331
ret );
1331
1332
}
1332
1333
1334
+ /*
1335
+ * Platforms support placing PHB in limited address mode starting with LoPAR
1336
+ * level 2.13 implement. In this mode, the DMA address returned by DDW is over
1337
+ * 4GB but, less than 64-bits. This benefits IO adapters that don't support
1338
+ * 64-bits for DMA addresses.
1339
+ */
1340
+ static int limited_dma_window (struct pci_dev * dev , struct device_node * par_dn )
1341
+ {
1342
+ int ret ;
1343
+ u32 cfg_addr , reset_dma_win , las_supported ;
1344
+ u64 buid ;
1345
+ struct device_node * dn ;
1346
+ struct pci_dn * pdn ;
1347
+
1348
+ ret = ddw_read_ext (par_dn , DDW_EXT_RESET_DMA_WIN , & reset_dma_win );
1349
+ if (ret )
1350
+ goto out ;
1351
+
1352
+ ret = ddw_read_ext (par_dn , DDW_EXT_LIMITED_ADDR_MODE , & las_supported );
1353
+
1354
+ /* Limited Address Space extension available on the platform but DDW in
1355
+ * limited addressing mode not supported
1356
+ */
1357
+ if (!ret && !las_supported )
1358
+ ret = - EPROTO ;
1359
+
1360
+ if (ret ) {
1361
+ dev_info (& dev -> dev , "Limited Address Space for DDW not Supported, err: %d" , ret );
1362
+ goto out ;
1363
+ }
1364
+
1365
+ dn = pci_device_to_OF_node (dev );
1366
+ pdn = PCI_DN (dn );
1367
+ buid = pdn -> phb -> buid ;
1368
+ cfg_addr = (pdn -> busno << 16 ) | (pdn -> devfn << 8 );
1369
+
1370
+ ret = rtas_call (reset_dma_win , 4 , 1 , NULL , cfg_addr , BUID_HI (buid ),
1371
+ BUID_LO (buid ), 1 );
1372
+ if (ret )
1373
+ dev_info (& dev -> dev ,
1374
+ "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d " ,
1375
+ reset_dma_win , cfg_addr , BUID_HI (buid ), BUID_LO (buid ),
1376
+ ret );
1377
+
1378
+ out :
1379
+ return ret ;
1380
+ }
1381
+
1333
1382
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
1334
1383
static int iommu_get_page_shift (u32 query_page_size )
1335
1384
{
@@ -1397,7 +1446,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
1397
1446
*
1398
1447
* returns true if can map all pages (direct mapping), false otherwise..
1399
1448
*/
1400
- static bool enable_ddw (struct pci_dev * dev , struct device_node * pdn )
1449
+ static bool enable_ddw (struct pci_dev * dev , struct device_node * pdn , u64 dma_mask )
1401
1450
{
1402
1451
int len = 0 , ret ;
1403
1452
int max_ram_len = order_base_2 (ddw_memory_hotplug_max ());
@@ -1416,6 +1465,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1416
1465
bool pmem_present ;
1417
1466
struct pci_dn * pci = PCI_DN (pdn );
1418
1467
struct property * default_win = NULL ;
1468
+ bool limited_addr_req = false, limited_addr_enabled = false;
1469
+ int dev_max_ddw ;
1470
+ int ddw_sz ;
1419
1471
1420
1472
dn = of_find_node_by_type (NULL , "ibm,pmemory" );
1421
1473
pmem_present = dn != NULL ;
@@ -1442,7 +1494,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1442
1494
* the ibm,ddw-applicable property holds the tokens for:
1443
1495
* ibm,query-pe-dma-window
1444
1496
* ibm,create-pe-dma-window
1445
- * ibm,remove-pe-dma-window
1446
1497
* for the given node in that order.
1447
1498
* the property is actually in the parent, not the PE
1448
1499
*/
@@ -1462,6 +1513,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1462
1513
if (ret != 0 )
1463
1514
goto out_failed ;
1464
1515
1516
+ /* DMA Limited Addressing required? This is when the driver has
1517
+ * requested to create DDW but supports mask which is less than 64-bits
1518
+ */
1519
+ limited_addr_req = (dma_mask != DMA_BIT_MASK (64 ));
1520
+
1521
+ /* place the PHB in Limited Addressing mode */
1522
+ if (limited_addr_req ) {
1523
+ if (limited_dma_window (dev , pdn ))
1524
+ goto out_failed ;
1525
+
1526
+ /* PHB is in Limited address mode */
1527
+ limited_addr_enabled = true;
1528
+ }
1529
+
1465
1530
/*
1466
1531
* If there is no window available, remove the default DMA window,
1467
1532
* if it's present. This will make all the resources available to the
@@ -1508,6 +1573,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1508
1573
goto out_failed ;
1509
1574
}
1510
1575
1576
+ /* Maximum DMA window size that the device can address (in log2) */
1577
+ dev_max_ddw = fls64 (dma_mask );
1578
+
1579
+ /* If the device DMA mask is less than 64-bits, make sure the DMA window
1580
+ * size is not bigger than what the device can access
1581
+ */
1582
+ ddw_sz = min (order_base_2 (query .largest_available_block << page_shift ),
1583
+ dev_max_ddw );
1584
+
1511
1585
/*
1512
1586
* The "ibm,pmemory" can appear anywhere in the address space.
1513
1587
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
@@ -1516,23 +1590,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1516
1590
*/
1517
1591
len = max_ram_len ;
1518
1592
if (pmem_present ) {
1519
- if (query .largest_available_block >=
1520
- (1ULL << (MAX_PHYSMEM_BITS - page_shift )))
1593
+ if (ddw_sz >= MAX_PHYSMEM_BITS )
1521
1594
len = MAX_PHYSMEM_BITS ;
1522
1595
else
1523
1596
dev_info (& dev -> dev , "Skipping ibm,pmemory" );
1524
1597
}
1525
1598
1526
1599
/* check if the available block * number of ptes will map everything */
1527
- if (query . largest_available_block < ( 1ULL << ( len - page_shift )) ) {
1600
+ if (ddw_sz < len ) {
1528
1601
dev_dbg (& dev -> dev ,
1529
1602
"can't map partition max 0x%llx with %llu %llu-sized pages\n" ,
1530
1603
1ULL << len ,
1531
1604
query .largest_available_block ,
1532
1605
1ULL << page_shift );
1533
1606
1534
- len = order_base_2 (query .largest_available_block << page_shift );
1535
-
1607
+ len = ddw_sz ;
1536
1608
dynamic_mapping = true;
1537
1609
} else {
1538
1610
direct_mapping = !default_win_removed ||
@@ -1546,8 +1618,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1546
1618
*/
1547
1619
if (default_win_removed && pmem_present && !direct_mapping ) {
1548
1620
/* DDW is big enough to be split */
1549
- if ((query .largest_available_block << page_shift ) >=
1550
- MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len )) {
1621
+ if ((1ULL << ddw_sz ) >=
1622
+ MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len )) {
1623
+
1551
1624
direct_mapping = true;
1552
1625
1553
1626
/* offset of the Dynamic part of DDW */
@@ -1558,8 +1631,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1558
1631
dynamic_mapping = true;
1559
1632
1560
1633
/* create max size DDW possible */
1561
- len = order_base_2 (query .largest_available_block
1562
- << page_shift );
1634
+ len = ddw_sz ;
1563
1635
}
1564
1636
}
1565
1637
@@ -1687,7 +1759,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1687
1759
__remove_dma_window (pdn , ddw_avail , create .liobn );
1688
1760
1689
1761
out_failed :
1690
- if (default_win_removed )
1762
+ if (default_win_removed || limited_addr_enabled )
1691
1763
reset_dma_window (dev , pdn );
1692
1764
1693
1765
fpdn = kzalloc (sizeof (* fpdn ), GFP_KERNEL );
@@ -1706,6 +1778,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1706
1778
dev -> dev .bus_dma_limit = dev -> dev .archdata .dma_offset +
1707
1779
(1ULL << max_ram_len );
1708
1780
1781
+ dev_info (& dev -> dev , "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n" ,
1782
+ limited_addr_req , limited_addr_enabled , direct_mapping );
1783
+
1709
1784
return direct_mapping ;
1710
1785
}
1711
1786
@@ -1831,8 +1906,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
1831
1906
{
1832
1907
struct device_node * dn = pci_device_to_OF_node (pdev ), * pdn ;
1833
1908
1834
- /* only attempt to use a new window if 64-bit DMA is requested */
1835
- if (dma_mask < DMA_BIT_MASK (64 ))
1909
+ /* For DDW, DMA mask should be more than 32-bits. For mask more then
1910
+ * 32-bits but less then 64-bits, DMA addressing is supported in
1911
+ * Limited Addressing mode.
1912
+ */
1913
+ if (dma_mask <= DMA_BIT_MASK (32 ))
1836
1914
return false;
1837
1915
1838
1916
dev_dbg (& pdev -> dev , "node is %pOF\n" , dn );
@@ -1845,7 +1923,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
1845
1923
*/
1846
1924
pdn = pci_dma_find (dn , NULL );
1847
1925
if (pdn && PCI_DN (pdn ))
1848
- return enable_ddw (pdev , pdn );
1926
+ return enable_ddw (pdev , pdn , dma_mask );
1849
1927
1850
1928
return false;
1851
1929
}
0 commit comments