OASIS Mailing List ArchivesView the OASIS mailing list archive below
or browse/search using MarkMail.

 


Help: OASIS Mailing Lists Help | MarkMail Help

virtio-dev message

[Date Prev] | [Thread Prev] | [Thread Next] | [Date Next] -- [Date Index] | [Thread Index] | [List Home]


Subject: [PATCH v4 2/6] virtio-vsock: avoid sleeping where not allowed


Some functions are called in af_vsock while the thread is prepared to wait.
Those functions are not allowed to sleep, but the virtio vsock backend
implementation does. Some issues can be solved by shrinking the area
influenced by the prepare_to_wait (addressed in subsequent patches).

This patch instead addresses the functions that are called to evaluate the
conditions that decide whether to actually perform the wait or not; they
therefore need to be called while the thread is prepared.  The issue here
was the mutex, which introduces sleeps; since all the critical sections are
small and simple, I changed the mutexes with spinlocks.

Signed-off-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
---
 include/linux/virtio_vsock.h            |  4 ++--
 net/vmw_vsock/virtio_transport_common.c | 39 +++++++++++++++++----------------
 2 files changed, 22 insertions(+), 21 deletions(-)

diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 4acf1ad..dbbd8a5 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -31,8 +31,8 @@ struct virtio_vsock_sock {
 	u32 buf_size_min;
 	u32 buf_size_max;
 
-	struct mutex tx_lock;
-	struct mutex rx_lock;
+	spinlock_t tx_lock;
+	spinlock_t rx_lock;
 
 	/* Protected by tx_lock */
 	u32 tx_cnt;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 000f12b..eab1644 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -7,6 +7,7 @@
  *
  * This work is licensed under the terms of the GNU GPL, version 2.
  */
+#include <linux/spinlock.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
 #include <linux/list.h>
@@ -105,10 +106,10 @@ static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
 
 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
 {
-	mutex_lock(&vvs->tx_lock);
+	spin_lock(&vvs->tx_lock);
 	pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
 	pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
-	mutex_unlock(&vvs->tx_lock);
+	spin_unlock(&vvs->tx_lock);
 }
 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
 
@@ -116,12 +117,12 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
 {
 	u32 ret;
 
-	mutex_lock(&vvs->tx_lock);
+	spin_lock(&vvs->tx_lock);
 	ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
 	if (ret > credit)
 		ret = credit;
 	vvs->tx_cnt += ret;
-	mutex_unlock(&vvs->tx_lock);
+	spin_unlock(&vvs->tx_lock);
 
 	return ret;
 }
@@ -129,9 +130,9 @@ EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
 
 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
 {
-	mutex_lock(&vvs->tx_lock);
+	spin_lock(&vvs->tx_lock);
 	vvs->tx_cnt -= credit;
-	mutex_unlock(&vvs->tx_lock);
+	spin_unlock(&vvs->tx_lock);
 }
 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
 
@@ -157,7 +158,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
 	size_t bytes, total = 0;
 	int err = -EFAULT;
 
-	mutex_lock(&vvs->rx_lock);
+	spin_lock(&vvs->rx_lock);
 	while (total < len &&
 	       vvs->rx_bytes > 0 &&
 	       !list_empty(&vvs->rx_queue)) {
@@ -179,7 +180,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
 			virtio_transport_free_pkt(pkt);
 		}
 	}
-	mutex_unlock(&vvs->rx_lock);
+	spin_unlock(&vvs->rx_lock);
 
 	/* Send a credit pkt to peer */
 	virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
@@ -188,7 +189,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
 	return total;
 
 out:
-	mutex_unlock(&vvs->rx_lock);
+	spin_unlock(&vvs->rx_lock);
 	if (total)
 		err = total;
 	return err;
@@ -220,9 +221,9 @@ s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
 	struct virtio_vsock_sock *vvs = vsk->trans;
 	s64 bytes;
 
-	mutex_lock(&vvs->rx_lock);
+	spin_lock(&vvs->rx_lock);
 	bytes = vvs->rx_bytes;
-	mutex_unlock(&vvs->rx_lock);
+	spin_unlock(&vvs->rx_lock);
 
 	return bytes;
 }
@@ -245,9 +246,9 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
 	struct virtio_vsock_sock *vvs = vsk->trans;
 	s64 bytes;
 
-	mutex_lock(&vvs->tx_lock);
+	spin_lock(&vvs->tx_lock);
 	bytes = virtio_transport_has_space(vsk);
-	mutex_unlock(&vvs->tx_lock);
+	spin_unlock(&vvs->tx_lock);
 
 	return bytes;
 }
@@ -279,8 +280,8 @@ int virtio_transport_do_socket_init(struct vsock_sock *vsk,
 
 	vvs->buf_alloc = vvs->buf_size;
 
-	mutex_init(&vvs->rx_lock);
-	mutex_init(&vvs->tx_lock);
+	spin_lock_init(&vvs->rx_lock);
+	spin_lock_init(&vvs->tx_lock);
 	INIT_LIST_HEAD(&vvs->rx_queue);
 
 	return 0;
@@ -632,10 +633,10 @@ virtio_transport_recv_connected(struct sock *sk,
 		pkt->len = le32_to_cpu(pkt->hdr.len);
 		pkt->off = 0;
 
-		mutex_lock(&vvs->rx_lock);
+		spin_lock(&vvs->rx_lock);
 		virtio_transport_inc_rx_pkt(vvs, pkt);
 		list_add_tail(&pkt->list, &vvs->rx_queue);
-		mutex_unlock(&vvs->rx_lock);
+		spin_unlock(&vvs->rx_lock);
 
 		sk->sk_data_ready(sk);
 		return err;
@@ -738,11 +739,11 @@ static void virtio_transport_space_update(struct sock *sk,
 	bool space_available;
 
 	/* buf_alloc and fwd_cnt is always included in the hdr */
-	mutex_lock(&vvs->tx_lock);
+	spin_lock(&vvs->tx_lock);
 	vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
 	vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
 	space_available = virtio_transport_has_space(vsk);
-	mutex_unlock(&vvs->tx_lock);
+	spin_unlock(&vvs->tx_lock);
 
 	if (space_available)
 		sk->sk_write_space(sk);
-- 
1.9.1



[Date Prev] | [Thread Prev] | [Thread Next] | [Date Next] -- [Date Index] | [Thread Index] | [List Home]