summaryrefslogtreecommitdiffstats
path: root/drivers/staging/lustre
diff options
context:
space:
mode:
authorLisa Nguyen <lisa@xenapiadmin.com>2013-10-18 02:11:44 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-10-19 13:35:27 -0700
commitaf66a6e29c5ed5032ae0678702d5531f954beb39 (patch)
treeb8e2462944789d6ed0886972ec45b3f50485a7db /drivers/staging/lustre
parent5f849bb0e4e3e64e85151efdf8b7d96338156b1f (diff)
downloadblackbird-op-linux-af66a6e29c5ed5032ae0678702d5531f954beb39.tar.gz
blackbird-op-linux-af66a6e29c5ed5032ae0678702d5531f954beb39.zip
staging/lustre/lnet: Remove unnecessary whitespace in lib-move.c
Remove unnecessary whitespace around open parentheses in lib-move.c to meet kernel coding standards. Signed-off-by: Lisa Nguyen <lisa@xenapiadmin.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/lustre')
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c294
1 files changed, 147 insertions, 147 deletions
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 49b0f1287a69..1b05108d9c3a 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -47,14 +47,14 @@ CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
"Reserved");
int
-lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
+lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
{
lnet_test_peer_t *tp;
struct list_head *el;
struct list_head *next;
struct list_head cull;
- LASSERT (the_lnet.ln_init);
+ LASSERT(the_lnet.ln_init);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
if (threshold != 0) {
@@ -77,31 +77,31 @@ lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
lnet_net_lock(0);
- list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = list_entry (el, lnet_test_peer_t, tp_list);
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
if (tp->tp_threshold == 0 || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
tp->tp_nid == nid) /* matched this one */
{
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
}
}
lnet_net_unlock(0);
- while (!list_empty (&cull)) {
- tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
- list_del (&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
+ list_del(&tp->tp_list);
+ LIBCFS_FREE(tp, sizeof(*tp));
}
return 0;
}
static int
-fail_peer (lnet_nid_t nid, int outgoing)
+fail_peer(lnet_nid_t nid, int outgoing)
{
lnet_test_peer_t *tp;
struct list_head *el;
@@ -109,13 +109,13 @@ fail_peer (lnet_nid_t nid, int outgoing)
struct list_head cull;
int fail = 0;
- INIT_LIST_HEAD (&cull);
+ INIT_LIST_HEAD(&cull);
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
lnet_net_lock(0);
- list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
- tp = list_entry (el, lnet_test_peer_t, tp_list);
+ list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry(el, lnet_test_peer_t, tp_list);
if (tp->tp_threshold == 0) {
/* zombie entry */
@@ -123,8 +123,8 @@ fail_peer (lnet_nid_t nid, int outgoing)
/* only cull zombies on outgoing tests,
* since we may be at interrupt priority on
* incoming messages. */
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
}
continue;
}
@@ -138,8 +138,8 @@ fail_peer (lnet_nid_t nid, int outgoing)
if (outgoing &&
tp->tp_threshold == 0) {
/* see above */
- list_del (&tp->tp_list);
- list_add (&tp->tp_list, &cull);
+ list_del(&tp->tp_list);
+ list_add(&tp->tp_list, &cull);
}
}
break;
@@ -148,18 +148,18 @@ fail_peer (lnet_nid_t nid, int outgoing)
lnet_net_unlock(0);
- while (!list_empty (&cull)) {
- tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
- list_del (&tp->tp_list);
+ while (!list_empty(&cull)) {
+ tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
+ list_del(&tp->tp_list);
- LIBCFS_FREE(tp, sizeof (*tp));
+ LIBCFS_FREE(tp, sizeof(*tp));
}
return (fail);
}
unsigned int
-lnet_iov_nob (unsigned int niov, struct iovec *iov)
+lnet_iov_nob(unsigned int niov, struct iovec *iov)
{
unsigned int nob = 0;
@@ -171,7 +171,7 @@ lnet_iov_nob (unsigned int niov, struct iovec *iov)
EXPORT_SYMBOL(lnet_iov_nob);
void
-lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
+lnet_copy_iov2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
unsigned int nsiov, struct iovec *siov, unsigned int soffset,
unsigned int nob)
{
@@ -182,31 +182,31 @@ lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
return;
/* skip complete frags before 'doffset' */
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
while (doffset >= diov->iov_len) {
doffset -= diov->iov_len;
diov++;
ndiov--;
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
}
/* skip complete frags before 'soffset' */
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
while (soffset >= siov->iov_len) {
soffset -= siov->iov_len;
siov++;
nsiov--;
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
}
do {
- LASSERT (ndiov > 0);
- LASSERT (nsiov > 0);
+ LASSERT(ndiov > 0);
+ LASSERT(nsiov > 0);
this_nob = MIN(diov->iov_len - doffset,
siov->iov_len - soffset);
this_nob = MIN(this_nob, nob);
- memcpy ((char *)diov->iov_base + doffset,
+ memcpy((char *)diov->iov_base + doffset,
(char *)siov->iov_base + soffset, this_nob);
nob -= this_nob;
@@ -230,7 +230,7 @@ lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
EXPORT_SYMBOL(lnet_copy_iov2iov);
int
-lnet_extract_iov (int dst_niov, struct iovec *dst,
+lnet_extract_iov(int dst_niov, struct iovec *dst,
int src_niov, struct iovec *src,
unsigned int offset, unsigned int len)
{
@@ -243,18 +243,18 @@ lnet_extract_iov (int dst_niov, struct iovec *dst,
if (len == 0) /* no data => */
return (0); /* no frags */
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
while (offset >= src->iov_len) { /* skip initial frags */
offset -= src->iov_len;
src_niov--;
src++;
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
}
niov = 1;
for (;;) {
- LASSERT (src_niov > 0);
- LASSERT ((int)niov <= dst_niov);
+ LASSERT(src_niov > 0);
+ LASSERT((int)niov <= dst_niov);
frag_len = src->iov_len - offset;
dst->iov_base = ((char *)src->iov_base) + offset;
@@ -278,7 +278,7 @@ EXPORT_SYMBOL(lnet_extract_iov);
unsigned int
-lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
+lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
{
unsigned int nob = 0;
@@ -290,7 +290,7 @@ lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
EXPORT_SYMBOL(lnet_kiov_nob);
void
-lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
+lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
unsigned int nob)
{
@@ -302,27 +302,27 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
while (doffset >= diov->kiov_len) {
doffset -= diov->kiov_len;
diov++;
ndiov--;
- LASSERT (ndiov > 0);
+ LASSERT(ndiov > 0);
}
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
while (soffset >= siov->kiov_len) {
soffset -= siov->kiov_len;
siov++;
nsiov--;
- LASSERT (nsiov > 0);
+ LASSERT(nsiov > 0);
}
do {
- LASSERT (ndiov > 0);
- LASSERT (nsiov > 0);
+ LASSERT(ndiov > 0);
+ LASSERT(nsiov > 0);
this_nob = MIN(diov->kiov_len - doffset,
siov->kiov_len - soffset);
this_nob = MIN(this_nob, nob);
@@ -338,7 +338,7 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
* However in practice at least one of the kiovs will be mapped
* kernel pages and the map/unmap will be NOOPs */
- memcpy (daddr, saddr, this_nob);
+ memcpy(daddr, saddr, this_nob);
nob -= this_nob;
if (diov->kiov_len > doffset + this_nob) {
@@ -372,7 +372,7 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
void
-lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
+lnet_copy_kiov2iov(unsigned int niov, struct iovec *iov, unsigned int iovoffset,
unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
unsigned int nob)
{
@@ -383,27 +383,27 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
while (iovoffset >= iov->iov_len) {
iovoffset -= iov->iov_len;
iov++;
niov--;
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
}
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
while (kiovoffset >= kiov->kiov_len) {
kiovoffset -= kiov->kiov_len;
kiov++;
nkiov--;
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
}
do {
- LASSERT (niov > 0);
- LASSERT (nkiov > 0);
+ LASSERT(niov > 0);
+ LASSERT(nkiov > 0);
this_nob = MIN(iov->iov_len - iovoffset,
kiov->kiov_len - kiovoffset);
this_nob = MIN(this_nob, nob);
@@ -412,7 +412,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
- memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
+ memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
nob -= this_nob;
if (iov->iov_len > iovoffset + this_nob) {
@@ -442,7 +442,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
EXPORT_SYMBOL(lnet_copy_kiov2iov);
void
-lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
+lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
unsigned int niov, struct iovec *iov, unsigned int iovoffset,
unsigned int nob)
{
@@ -453,27 +453,27 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
if (nob == 0)
return;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
while (kiovoffset >= kiov->kiov_len) {
kiovoffset -= kiov->kiov_len;
kiov++;
nkiov--;
- LASSERT (nkiov > 0);
+ LASSERT(nkiov > 0);
}
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
while (iovoffset >= iov->iov_len) {
iovoffset -= iov->iov_len;
iov++;
niov--;
- LASSERT (niov > 0);
+ LASSERT(niov > 0);
}
do {
- LASSERT (nkiov > 0);
- LASSERT (niov > 0);
+ LASSERT(nkiov > 0);
+ LASSERT(niov > 0);
this_nob = MIN(kiov->kiov_len - kiovoffset,
iov->iov_len - iovoffset);
this_nob = MIN(this_nob, nob);
@@ -482,7 +482,7 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
- memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
nob -= this_nob;
if (kiov->kiov_len > kiovoffset + this_nob) {
@@ -511,7 +511,7 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
EXPORT_SYMBOL(lnet_copy_iov2kiov);
int
-lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
+lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
int src_niov, lnet_kiov_t *src,
unsigned int offset, unsigned int len)
{
@@ -524,18 +524,18 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
if (len == 0) /* no data => */
return (0); /* no frags */
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
while (offset >= src->kiov_len) { /* skip initial frags */
offset -= src->kiov_len;
src_niov--;
src++;
- LASSERT (src_niov > 0);
+ LASSERT(src_niov > 0);
}
niov = 1;
for (;;) {
- LASSERT (src_niov > 0);
- LASSERT ((int)niov <= dst_niov);
+ LASSERT(src_niov > 0);
+ LASSERT((int)niov <= dst_niov);
frag_len = src->kiov_len - offset;
dst->kiov_page = src->kiov_page;
@@ -543,12 +543,12 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
if (len <= frag_len) {
dst->kiov_len = len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
return (niov);
}
dst->kiov_len = frag_len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
len -= frag_len;
dst++;
@@ -569,8 +569,8 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
lnet_kiov_t *kiov = NULL;
int rc;
- LASSERT (!in_interrupt ());
- LASSERT (mlen == 0 || msg != NULL);
+ LASSERT(!in_interrupt());
+ LASSERT(mlen == 0 || msg != NULL);
if (msg != NULL) {
LASSERT(msg->msg_receiving);
@@ -587,8 +587,8 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
iov = msg->msg_iov;
kiov = msg->msg_kiov;
- LASSERT (niov > 0);
- LASSERT ((iov == NULL) != (kiov == NULL));
+ LASSERT(niov > 0);
+ LASSERT((iov == NULL) != (kiov == NULL));
}
}
@@ -603,12 +603,12 @@ lnet_setpayloadbuffer(lnet_msg_t *msg)
{
lnet_libmd_t *md = msg->msg_md;
- LASSERT (msg->msg_len > 0);
- LASSERT (!msg->msg_routing);
- LASSERT (md != NULL);
- LASSERT (msg->msg_niov == 0);
- LASSERT (msg->msg_iov == NULL);
- LASSERT (msg->msg_kiov == NULL);
+ LASSERT(msg->msg_len > 0);
+ LASSERT(!msg->msg_routing);
+ LASSERT(md != NULL);
+ LASSERT(msg->msg_niov == 0);
+ LASSERT(msg->msg_iov == NULL);
+ LASSERT(msg->msg_kiov == NULL);
msg->msg_niov = md->md_niov;
if ((md->md_options & LNET_MD_KIOV) != 0)
@@ -629,7 +629,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
if (len != 0)
lnet_setpayloadbuffer(msg);
- memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
+ memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
msg->msg_hdr.type = cpu_to_le32(type);
msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
@@ -644,8 +644,8 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
void *priv = msg->msg_private;
int rc;
- LASSERT (!in_interrupt ());
- LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
+ LASSERT(!in_interrupt());
+ LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
(msg->msg_txcredit && msg->msg_peertxcredit));
rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
@@ -698,12 +698,12 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
/* NB: always called with lnet_net_lock held */
static inline int
-lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
+lnet_peer_is_alive(lnet_peer_t *lp, cfs_time_t now)
{
int alive;
cfs_time_t deadline;
- LASSERT (lnet_peer_aliveness_enabled(lp));
+ LASSERT(lnet_peer_aliveness_enabled(lp));
/* Trust lnet_notify() if it has more recent aliveness news, but
* ignore the initial assumed death (see lnet_peers_start_down()).
@@ -731,7 +731,7 @@ lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
int
-lnet_peer_alive_locked (lnet_peer_t *lp)
+lnet_peer_alive_locked(lnet_peer_t *lp)
{
cfs_time_t now = cfs_time_current();
@@ -809,7 +809,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
}
if (!msg->msg_peertxcredit) {
- LASSERT ((lp->lp_txcredits < 0) ==
+ LASSERT((lp->lp_txcredits < 0) ==
!list_empty(&lp->lp_txq));
msg->msg_peertxcredit = 1;
@@ -873,7 +873,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
}
int
-lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
+lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
{
/* lnet_parse is going to lnet_net_unlock immediately after this, so it
* sets do_recv FALSE and I don't do the unlock/send/lock bit. I
@@ -882,18 +882,18 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
lnet_rtrbufpool_t *rbp;
lnet_rtrbuf_t *rb;
- LASSERT (msg->msg_iov == NULL);
- LASSERT (msg->msg_kiov == NULL);
- LASSERT (msg->msg_niov == 0);
- LASSERT (msg->msg_routing);
- LASSERT (msg->msg_receiving);
- LASSERT (!msg->msg_sending);
+ LASSERT(msg->msg_iov == NULL);
+ LASSERT(msg->msg_kiov == NULL);
+ LASSERT(msg->msg_niov == 0);
+ LASSERT(msg->msg_routing);
+ LASSERT(msg->msg_receiving);
+ LASSERT(!msg->msg_sending);
/* non-lnet_parse callers only receive delayed messages */
LASSERT(!do_recv || msg->msg_rx_delayed);
if (!msg->msg_peerrtrcredit) {
- LASSERT ((lp->lp_rtrcredits < 0) ==
+ LASSERT((lp->lp_rtrcredits < 0) ==
!list_empty(&lp->lp_rtrq));
msg->msg_peerrtrcredit = 1;
@@ -913,7 +913,7 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
rbp = lnet_msg2bufpool(msg);
if (!msg->msg_rtrcredit) {
- LASSERT ((rbp->rbp_credits < 0) ==
+ LASSERT((rbp->rbp_credits < 0) ==
!list_empty(&rbp->rbp_msgs));
msg->msg_rtrcredit = 1;
@@ -930,7 +930,7 @@ lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
}
}
- LASSERT (!list_empty(&rbp->rbp_bufs));
+ LASSERT(!list_empty(&rbp->rbp_bufs));
rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
list_del(&rb->rb_list);
@@ -985,7 +985,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
!list_empty(&txpeer->lp_txq));
txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
- LASSERT (txpeer->lp_txqnob >= 0);
+ LASSERT(txpeer->lp_txqnob >= 0);
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
@@ -1020,11 +1020,11 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg)
/* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
* there until it gets one allocated, or aborts the wait
* itself */
- LASSERT (msg->msg_kiov != NULL);
+ LASSERT(msg->msg_kiov != NULL);
rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
rbp = rb->rb_pool;
- LASSERT (rbp == lnet_msg2bufpool(msg));
+ LASSERT(rbp == lnet_msg2bufpool(msg));
msg->msg_kiov = NULL;
msg->msg_rtrcredit = 0;
@@ -1172,10 +1172,10 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
* but we might want to use pre-determined router for ACK/REPLY
* in the future */
/* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
- LASSERT (msg->msg_txpeer == NULL);
- LASSERT (!msg->msg_sending);
- LASSERT (!msg->msg_target_is_router);
- LASSERT (!msg->msg_receiving);
+ LASSERT(msg->msg_txpeer == NULL);
+ LASSERT(!msg->msg_sending);
+ LASSERT(!msg->msg_target_is_router);
+ LASSERT(!msg->msg_receiving);
msg->msg_sending = 1;
@@ -1200,7 +1200,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
libcfs_nid2str(src_nid));
return -EINVAL;
}
- LASSERT (!msg->msg_routing);
+ LASSERT(!msg->msg_routing);
}
/* Is this for someone on a local network? */
@@ -1249,7 +1249,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
/* ENOMEM or shutting down */
return rc;
}
- LASSERT (lp->lp_ni == src_ni);
+ LASSERT(lp->lp_ni == src_ni);
} else {
/* sending to a remote network */
lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
@@ -1290,7 +1290,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
src_ni = lp->lp_ni;
src_nid = src_ni->ni_nid;
} else {
- LASSERT (src_ni == lp->lp_ni);
+ LASSERT(src_ni == lp->lp_ni);
lnet_ni_decref_locked(src_ni, cpt);
}
@@ -1311,9 +1311,9 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
/* 'lp' is our best choice of peer */
- LASSERT (!msg->msg_peertxcredit);
- LASSERT (!msg->msg_txcredit);
- LASSERT (msg->msg_txpeer == NULL);
+ LASSERT(!msg->msg_peertxcredit);
+ LASSERT(!msg->msg_txcredit);
+ LASSERT(msg->msg_txpeer == NULL);
msg->msg_txpeer = lp; /* msg takes my ref on lp */
@@ -1509,7 +1509,7 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
return ENOENT; /* +ve: OK but no match */
}
- LASSERT (md->md_offset == 0);
+ LASSERT(md->md_offset == 0);
rlength = hdr->payload_length;
mlength = MIN(rlength, (int)md->md_length);
@@ -1614,7 +1614,7 @@ lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
}
char *
-lnet_msgtyp2str (int type)
+lnet_msgtyp2str(int type)
{
switch (type) {
case LNET_MSG_ACK:
@@ -1638,7 +1638,7 @@ lnet_print_hdr(lnet_hdr_t * hdr)
{
lnet_process_id_t src = {0};
lnet_process_id_t dst = {0};
- char *type_str = lnet_msgtyp2str (hdr->type);
+ char *type_str = lnet_msgtyp2str(hdr->type);
src.nid = hdr->src_nid;
src.pid = hdr->src_pid;
@@ -1709,7 +1709,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
__u32 payload_length;
__u32 type;
- LASSERT (!in_interrupt ());
+ LASSERT(!in_interrupt());
type = le32_to_cpu(hdr->type);
src_nid = le64_to_cpu(hdr->src_nid);
@@ -1772,7 +1772,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (!for_me) {
if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
/* should have gone direct */
- CERROR ("%s, src %s: Bad dest nid %s "
+ CERROR("%s, src %s: Bad dest nid %s "
"(should have been sent direct)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1783,7 +1783,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (lnet_islocalnid(dest_nid)) {
/* dest is another local NI; sender should have used
* this node's NID on its own network */
- CERROR ("%s, src %s: Bad dest nid %s "
+ CERROR("%s, src %s: Bad dest nid %s "
"(it's my nid but on a different network)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1792,7 +1792,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
}
if (rdma_req && type == LNET_MSG_GET) {
- CERROR ("%s, src %s: Bad optimized GET for %s "
+ CERROR("%s, src %s: Bad optimized GET for %s "
"(final destination must be me)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1801,7 +1801,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
}
if (!the_lnet.ln_routing) {
- CERROR ("%s, src %s: Dropping message for %s "
+ CERROR("%s, src %s: Dropping message for %s "
"(routing not enabled)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1813,8 +1813,8 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
/* Message looks OK; we're not going to return an error, so we MUST
* call back lnd_recv() come what may... */
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (src_nid, 0)) /* shall we now? */
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(src_nid, 0)) /* shall we now? */
{
CERROR("%s, src %s: Dropping %s to simulate failure\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
@@ -1906,7 +1906,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (rc == 0)
return 0;
- LASSERT (rc == ENOENT);
+ LASSERT(rc == ENOENT);
free_drop:
LASSERT(msg->msg_md == NULL);
@@ -2047,11 +2047,11 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) /* shall we now? */
{
CERROR("Dropping PUT to %s: simulated failure\n",
libcfs_id2str(target));
@@ -2113,9 +2113,9 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
rc = lnet_send(self, msg, LNET_NID_ANY);
if (rc != 0) {
- CNETERR( "Error sending PUT to %s: %d\n",
+ CNETERR("Error sending PUT to %s: %d\n",
libcfs_id2str(target), rc);
- lnet_finalize (NULL, msg, rc);
+ lnet_finalize(NULL, msg, rc);
}
/* completion will be signalled by an event */
@@ -2124,7 +2124,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
EXPORT_SYMBOL(LNetPut);
lnet_msg_t *
-lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
+lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
{
/* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
* returns a msg for the LND to pass to lnet_finalize() when the sink
@@ -2144,16 +2144,16 @@ lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
lnet_res_lock(cpt);
- LASSERT (getmd->md_refcount > 0);
+ LASSERT(getmd->md_refcount > 0);
if (msg == NULL) {
- CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
+ CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
goto drop;
}
if (getmd->md_threshold == 0) {
- CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
+ CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
getmd);
lnet_res_unlock(cpt);
@@ -2205,13 +2205,13 @@ lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
{
/* Set the REPLY length, now the RDMA that elides the REPLY message has
* completed and I know it. */
- LASSERT (reply != NULL);
- LASSERT (reply->msg_type == LNET_MSG_GET);
- LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
+ LASSERT(reply != NULL);
+ LASSERT(reply->msg_type == LNET_MSG_GET);
+ LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
/* NB I trusted my peer to RDMA. If she tells me she's written beyond
* the end of my buffer, I might as well be dead. */
- LASSERT (len <= reply->msg_ev.mlength);
+ LASSERT(len <= reply->msg_ev.mlength);
reply->msg_ev.mlength = len;
}
@@ -2247,11 +2247,11 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
int cpt;
int rc;
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
- if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
- fail_peer (target.nid, 1)) /* shall we now? */
+ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer(target.nid, 1)) /* shall we now? */
{
CERROR("Dropping GET to %s: simulated failure\n",
libcfs_id2str(target));
@@ -2307,9 +2307,9 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
rc = lnet_send(self, msg, LNET_NID_ANY);
if (rc < 0) {
- CNETERR( "Error sending GET to %s: %d\n",
+ CNETERR("Error sending GET to %s: %d\n",
libcfs_id2str(target), rc);
- lnet_finalize (NULL, msg, rc);
+ lnet_finalize(NULL, msg, rc);
}
/* completion will be signalled by an event */
@@ -2348,12 +2348,12 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
* keep order 0 free for 0@lo and order 1 free for a local NID
* match */
- LASSERT (the_lnet.ln_init);
- LASSERT (the_lnet.ln_refcount > 0);
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
- list_for_each (e, &the_lnet.ln_nis) {
+ list_for_each(e, &the_lnet.ln_nis) {
ni = list_entry(e, lnet_ni_t, ni_list);
if (ni->ni_nid == dstnid) {
@@ -2390,7 +2390,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
lnet_route_t *route;
lnet_route_t *shortest = NULL;
- LASSERT (!list_empty(&rnet->lrn_routes));
+ LASSERT(!list_empty(&rnet->lrn_routes));
list_for_each_entry(route, &rnet->lrn_routes,
lr_list) {
@@ -2399,7 +2399,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
shortest = route;
}
- LASSERT (shortest != NULL);
+ LASSERT(shortest != NULL);
hops = shortest->lr_hops;
if (srcnidp != NULL)
*srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
OpenPOWER on IntegriCloud