@@ -96,16 +96,15 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
96
96
*/
97
97
static void idpf_ctlq_shutdown (struct idpf_hw * hw , struct idpf_ctlq_info * cq )
98
98
{
99
- mutex_lock (& cq -> cq_lock );
99
+ spin_lock (& cq -> cq_lock );
100
100
101
101
/* free ring buffers and the ring itself */
102
102
idpf_ctlq_dealloc_ring_res (hw , cq );
103
103
104
104
/* Set ring_size to 0 to indicate uninitialized queue */
105
105
cq -> ring_size = 0 ;
106
106
107
- mutex_unlock (& cq -> cq_lock );
108
- mutex_destroy (& cq -> cq_lock );
107
+ spin_unlock (& cq -> cq_lock );
109
108
}
110
109
111
110
/**
@@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
173
172
174
173
idpf_ctlq_init_regs (hw , cq , is_rxq );
175
174
176
- mutex_init (& cq -> cq_lock );
175
+ spin_lock_init (& cq -> cq_lock );
177
176
178
177
list_add (& cq -> cq_list , & hw -> cq_list_head );
179
178
@@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
272
271
int err = 0 ;
273
272
int i ;
274
273
275
- mutex_lock (& cq -> cq_lock );
274
+ spin_lock (& cq -> cq_lock );
276
275
277
276
/* Ensure there are enough descriptors to send all messages */
278
277
num_desc_avail = IDPF_CTLQ_DESC_UNUSED (cq );
@@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
332
331
wr32 (hw , cq -> reg .tail , cq -> next_to_use );
333
332
334
333
err_unlock :
335
- mutex_unlock (& cq -> cq_lock );
334
+ spin_unlock (& cq -> cq_lock );
336
335
337
336
return err ;
338
337
}
@@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
364
363
if (* clean_count > cq -> ring_size )
365
364
return - EBADR ;
366
365
367
- mutex_lock (& cq -> cq_lock );
366
+ spin_lock (& cq -> cq_lock );
368
367
369
368
ntc = cq -> next_to_clean ;
370
369
@@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
394
393
395
394
cq -> next_to_clean = ntc ;
396
395
397
- mutex_unlock (& cq -> cq_lock );
396
+ spin_unlock (& cq -> cq_lock );
398
397
399
398
/* Return number of descriptors actually cleaned */
400
399
* clean_count = i ;
@@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
432
431
if (* buff_count > 0 )
433
432
buffs_avail = true;
434
433
435
- mutex_lock (& cq -> cq_lock );
434
+ spin_lock (& cq -> cq_lock );
436
435
437
436
if (tbp >= cq -> ring_size )
438
437
tbp = 0 ;
@@ -521,7 +520,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
521
520
wr32 (hw , cq -> reg .tail , cq -> next_to_post );
522
521
}
523
522
524
- mutex_unlock (& cq -> cq_lock );
523
+ spin_unlock (& cq -> cq_lock );
525
524
526
525
/* return the number of buffers that were not posted */
527
526
* buff_count = * buff_count - i ;
@@ -549,7 +548,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
549
548
u16 i ;
550
549
551
550
/* take the lock before we start messing with the ring */
552
- mutex_lock (& cq -> cq_lock );
551
+ spin_lock (& cq -> cq_lock );
553
552
554
553
ntc = cq -> next_to_clean ;
555
554
@@ -608,7 +607,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
608
607
609
608
cq -> next_to_clean = ntc ;
610
609
611
- mutex_unlock (& cq -> cq_lock );
610
+ spin_unlock (& cq -> cq_lock );
612
611
613
612
* num_q_msg = i ;
614
613
if (* num_q_msg == 0 )
0 commit comments