本文主要对 sip 中的 sip_transport 在发送与接收过程中的流程,其余内容暂时不关注
欢迎进入q群761341723,大家一起讨论问题。hpng该网站为我自己网站,一些想法也会发到这里
tsx_send_msg
tsx_send_msg
函数的定义位于 pjsip/src/pjsip/sip_transaction.c 中,其中内容如下所示:
/*
* Send message to the transport.
*/
static pj_status_t tsx_send_msg( pjsip_transaction *tsx,
pjsip_tx_data *tdata)
{
pj_status_t status = PJ_SUCCESS;
PJ_ASSERT_RETURN(tsx && tdata, PJ_EINVAL);
/* Send later if transport is still pending. */
if (tsx->transport_flag & TSX_HAS_PENDING_TRANSPORT) {
tsx->transport_flag |= TSX_HAS_PENDING_SEND;
return PJ_SUCCESS;
}
/* Skip send if previous tdata transmission is pending (see #1665). */
if (tdata->is_pending) {
PJ_LOG(2,(THIS_FILE, "Unable to send %s: message is pending",
pjsip_tx_data_get_info(tdata)));
return PJ_SUCCESS;
}
/* If we have the transport, send the message using that transport.
* Otherwise perform full transport resolution.
*/
if (tsx->transport) {
/* Increment group lock while waiting for send operation to complete,
* to prevent us from being destroyed prematurely. See
* https://github.com/pjsip/pjproject/issues/1646
*/
pj_grp_lock_add_ref(tsx->grp_lock);
tsx->transport_flag |= TSX_HAS_PENDING_TRANSPORT;
status = pjsip_transport_send( tsx->transport, tdata, &tsx->addr,
tsx->addr_len, tsx,
&transport_callback);
if (status == PJ_EPENDING)
status = PJ_SUCCESS;
else {
/* Operation completes immediately */
tsx->transport_flag &= ~(TSX_HAS_PENDING_TRANSPORT);
pj_grp_lock_dec_ref(tsx->grp_lock);
}
if (status != PJ_SUCCESS) {
PJ_PERROR(2,(tsx->obj_name, status,
"Error sending %s",
pjsip_tx_data_get_info(tdata)));
/* On error, release transport to force using full transport
* resolution procedure.
*/
tsx_update_transport(tsx, NULL);
tsx->addr_len = 0;
tsx->res_addr.transport = NULL;
tsx->res_addr.addr_len = 0;
} else {
return PJ_SUCCESS;
}
}
/* We are here because we don't have transport, or we failed to send
* the message using existing transport. If we haven't resolved the
* server before, then begin the long process of resolving the server
* and send the message with possibly new server.
*/
pj_assert(status != PJ_SUCCESS || tsx->transport == NULL);
/* If we have resolved the server, we treat the error as permanent error.
* Terminate transaction with transport error failure.
*/
if (tsx->transport_flag & TSX_HAS_RESOLVED_SERVER) {
char errmsg[PJ_ERR_MSG_SIZE];
pj_str_t err;
if (status == PJ_SUCCESS) {
pj_assert(!"Unexpected status!");
status = PJ_EUNKNOWN;
}
/* We have resolved the server!.
* Treat this as permanent transport error.
*/
err = pj_strerror(status, errmsg, sizeof(errmsg));
PJ_LOG(2,(tsx->obj_name,
"Transport error, terminating transaction. "
"Err=%d (%s)",
status, errmsg));
tsx_set_status_code(tsx, PJSIP_SC_TSX_TRANSPORT_ERROR, &err);
tsx_set_state( tsx, PJSIP_TSX_STATE_TERMINATED,
PJSIP_EVENT_TRANSPORT_ERROR, NULL, 0 );
return status;
}
/* Must add reference counter because the send request functions
* decrement the reference counter.
*/
pjsip_tx_data_add_ref(tdata);
/* Also attach ourselves to the transmit data so that we'll be able
* to unregister ourselves from the send notification of this
* transmit data.
*/
tdata->mod_data[mod_tsx_layer.mod.id] = tsx;
tsx->pending_tx = tdata;
/* Increment group lock while waiting for send operation to complete,
* to prevent us from being destroyed prematurely (ticket #1859).
*/
pj_grp_lock_add_ref(tsx->grp_lock);
/* Begin resolving destination etc to send the message. */
if (tdata->msg->type == PJSIP_REQUEST_MSG) {
tsx->transport_flag |= TSX_HAS_PENDING_TRANSPORT;
status = pjsip_endpt_send_request_stateless(tsx->endpt, tdata, tsx,
&send_msg_callback);
if (status == PJ_EPENDING)
status = PJ_SUCCESS;
if (status != PJ_SUCCESS) {
tsx->transport_flag &= ~(TSX_HAS_PENDING_TRANSPORT);
pj_grp_lock_dec_ref(tsx->grp_lock);
pjsip_tx_data_dec_ref(tdata);
tdata->mod_data[mod_tsx_layer.mod.id] = NULL;
tsx->pending_tx = NULL;
}
/* Check if transaction is terminated. */
if (status==PJ_SUCCESS && tsx->state == PJSIP_TSX_STATE_TERMINATED)
status = tsx->transport_err;
} else {
tsx->transport_flag |= TSX_HAS_PENDING_TRANSPORT;
status = pjsip_endpt_send_response( tsx->endpt, &tsx->res_addr,
tdata, tsx,
&send_msg_callback);
if (status == PJ_EPENDING)
status = PJ_SUCCESS;
if (status != PJ_SUCCESS) {
tsx->transport_flag &= ~(TSX_HAS_PENDING_TRANSPORT);
pj_grp_lock_dec_ref(tsx->grp_lock);
pjsip_tx_data_dec_ref(tdata);
tdata->mod_data[mod_tsx_layer.mod.id] = NULL;
tsx->pending_tx = NULL;
}
/* Check if transaction is terminated. */
if (status==PJ_SUCCESS && tsx->state == PJSIP_TSX_STATE_TERMINATED)
status = tsx->transport_err;
}
return status;
}
事务发送数据主要有三种方式,如下所示:
- pjsip_transport_send: 事务本身就拥有 sip_transport,那么可以直接调用进行发送
- pjsip_endpt_send_request_stateless: 事务本身属于无状态的,那么发送的时候就没有 sip_transport,就通过此函数进行发送
- pjsip_endpt_send_response: 发送内容属于需要响应的内容,且本身的事务没有 sip_transport。类似于 ack?
- send_msg_callback:发送成功后的回调,能够获取到发送成功的 transport,以供后续使用。
pjsip_transport_send
pjsip_transport_send
函数的定义位于 pjsip/src/pjsip/sip_transport.c 中,其中内容如下所示:
/*
* Send a SIP message using the specified transport.
*/
PJ_DEF(pj_status_t) pjsip_transport_send( pjsip_transport *tr,
pjsip_tx_data *tdata,
const pj_sockaddr_t *addr,
int addr_len,
void *token,
pjsip_tp_send_callback cb)
{
pj_status_t status;
PJ_ASSERT_RETURN(tr && tdata && addr, PJ_EINVAL);
/* Is it currently being sent? */
if (tdata->is_pending) {
pj_assert(!"Invalid operation step!");
PJ_LOG(2,(THIS_FILE, "Unable to send %s: message is pending",
pjsip_tx_data_get_info(tdata)));
return PJSIP_EPENDINGTX;
}
/* Add reference to prevent deletion, and to cancel idle timer if
* it's running.
*/
pjsip_transport_add_ref(tr);
/* Fill in tp_info. */
tdata->tp_info.transport = tr;
pj_memcpy(&tdata->tp_info.dst_addr, addr, addr_len);
tdata->tp_info.dst_addr_len = addr_len;
pj_inet_ntop(((pj_sockaddr*)addr)->addr.sa_family,
pj_sockaddr_get_addr(addr),
tdata->tp_info.dst_name,
sizeof(tdata->tp_info.dst_name));
tdata->tp_info.dst_port = pj_sockaddr_get_port(addr);
/* Distribute to modules.
* When the message reach mod_msg_print, the contents of the message will
* be "printed" to contiguous buffer.
*/
if (tr->tpmgr->on_tx_msg) {
status = (*tr->tpmgr->on_tx_msg)(tr->endpt, tdata);
if (status != PJ_SUCCESS) {
pjsip_transport_dec_ref(tr);
return status;
}
}
/* Save callback data. */
tdata->token = token;
tdata->cb = cb;
/* Add reference counter. */
pjsip_tx_data_add_ref(tdata);
/* Mark as pending. */
tdata->is_pending = 1;
/* Send to transport. */
status = (*tr->send_msg)(tr, tdata, addr, addr_len, (void*)tdata,
&transport_send_callback);
if (status != PJ_EPENDING) {
tdata->is_pending = 0;
pjsip_tx_data_dec_ref(tdata);
}
pjsip_transport_dec_ref(tr);
return status;
}
- tr->send_msg:调用 udp/tcp/tls 实现的 send_msg 函数,send_msg。在 《pjsip创建sip_transport的过程》一文中我们解释了 send_msg 的注册。下面我们跳转到 udp 的 send_msg 来看看如何发送数据
udp_send_msg
udp_send_msg
函数的定义位于 pjsip/src/pjsip/sip_transport_udp.c 中,其中内容如下所示:
/*
* udp_send_msg()
*
* This function is called by transport manager (by transport->send_msg())
* to send outgoing message.
*/
static pj_status_t udp_send_msg( pjsip_transport *transport,
pjsip_tx_data *tdata,
const pj_sockaddr_t *rem_addr,
int addr_len,
void *token,
pjsip_transport_callback callback)
{
struct udp_transport *tp = (struct udp_transport*)transport;
pj_ssize_t size;
pj_status_t status;
PJ_ASSERT_RETURN(transport && tdata, PJ_EINVAL);
PJ_ASSERT_RETURN(tdata->op_key.tdata == NULL, PJSIP_EPENDINGTX);
/* Return error if transport is paused */
if (tp->is_paused)
return PJSIP_ETPNOTAVAIL;
/* Init op key. */
tdata->op_key.tdata = tdata;
tdata->op_key.token = token;
tdata->op_key.callback = callback;
/* Send to ioqueue! */
size = tdata->buf.cur - tdata->buf.start;
status = pj_ioqueue_sendto(tp->key, (pj_ioqueue_op_key_t*)&tdata->op_key,
tdata->buf.start, &size, 0,
rem_addr, addr_len);
if (status != PJ_EPENDING) {
#if 0
/* Auto restart is disabled, see #2881 */
if (status == PJ_ESOCKETSTOP) {
/* Try to recover by restarting the transport. */
PJ_LOG(4,(tp->base.obj_name, "Restarting SIP UDP transport"));
status = pjsip_udp_transport_restart2(
&tp->base,
PJSIP_UDP_TRANSPORT_DESTROY_SOCKET,
PJ_INVALID_SOCKET,
&tp->base.local_addr,
&tp->base.local_name);
if (status != PJ_SUCCESS) {
PJ_PERROR(1,(THIS_FILE, status,
"Error restarting SIP UDP transport"));
}
}
#endif
tdata->op_key.tdata = NULL;
}
return status;
}
tdata->op_key.tdata
: 此处初始化了 op_key,用于 ioqueue 发送数据所需要的参数- pj_ioqueue_sendto: 最终我们通过此函数将数据发送了出去
pj_ioqueue_sendto
pj_ioqueue_sendto
函数的定义位于 pjlib/src/pj/ioqueue_common_abs.c 中,我们只看这个和 android 有关的函数,其中内容如下所示:
/*
* pj_ioqueue_sendto()
*
* Start asynchronous write() to the descriptor.
*/
PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_key_t *key,
pj_ioqueue_op_key_t *op_key,
const void *data,
pj_ssize_t *length,
pj_uint32_t flags,
const pj_sockaddr_t *addr,
int addrlen)
{
struct write_operation *write_op;
unsigned retry;
pj_bool_t restart_retry = PJ_FALSE;
pj_status_t status;
pj_ssize_t sent;
PJ_ASSERT_RETURN(key && op_key && data && length, PJ_EINVAL);
PJ_CHECK_STACK();
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
retry_on_restart:
#else
PJ_UNUSED_ARG(restart_retry);
#endif
/* Check if key is closing. */
if (IS_CLOSING(key))
return PJ_ECANCELLED;
/* We can not use PJ_IOQUEUE_ALWAYS_ASYNC for socket write */
flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC);
/* Fast track:
* Try to send data immediately, only if there's no pending write!
* Note:
* We are speculating that the list is empty here without properly
* acquiring ioqueue's mutex first. This is intentional, to maximize
* performance via parallelism.
*
* This should be safe, because:
* - by convention, we require caller to make sure that the
* key is not unregistered while other threads are invoking
* an operation on the same key.
* - pj_list_empty() is safe to be invoked by multiple threads,
* even when other threads are modifying the list.
*/
if (pj_list_empty(&key->write_list)) {
/*
* See if data can be sent immediately.
*/
sent = *length;
status = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen);
if (status == PJ_SUCCESS) {
/* Success! */
*length = sent;
return PJ_SUCCESS;
} else {
/* If error is not EWOULDBLOCK (or EAGAIN on Linux), report
* the error to caller.
*/
if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) {
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
/* Special treatment for dead UDP sockets here, see ticket #1107 */
if (status == PJ_STATUS_FROM_OS(EPIPE) && !IS_CLOSING(key) &&
key->fd_type == pj_SOCK_DGRAM())
{
if (!restart_retry) {
PJ_PERROR(4, (THIS_FILE, status,
"Send error for socket %ld, retrying",
key->fd));
status = replace_udp_sock(key);
if (status == PJ_SUCCESS) {
restart_retry = PJ_TRUE;
goto retry_on_restart;
}
}
status = PJ_ESOCKETSTOP;
}
#endif
return status;
}
}
}
/*
* Check that address storage can hold the address parameter.
*/
PJ_ASSERT_RETURN(addrlen <= (int)sizeof(pj_sockaddr_in), PJ_EBUG);
/*
* Schedule asynchronous send.
*/
write_op = (struct write_operation*)op_key;
/* Spin if write_op has pending operation */
for (retry=0; write_op->op != 0 && retry<PENDING_RETRY; ++retry)
pj_thread_sleep(0);
/* Last chance */
if (write_op->op) {
/* Unable to send packet because there is already pending write on the
* write_op. We could not put the operation into the write_op
* because write_op already contains a pending operation! And
* we could not send the packet directly with sendto() either,
* because that will break the order of the packet. So we can
* only return error here.
*
* This could happen for example in multithreads program,
* where polling is done by one thread, while other threads are doing
* the sending only. If the polling thread runs on lower priority
* than the sending thread, then it's possible that the pending
* write flag is not cleared in-time because clearing is only done
* during polling.
*
* Aplication should specify multiple write operation keys on
* situation like this.
*/
//pj_assert(!"ioqueue: there is pending operation on this key!");
return PJ_EBUSY;
}
write_op->op = PJ_IOQUEUE_OP_SEND_TO;
write_op->buf = (char*)data;
write_op->size = *length;
write_op->written = 0;
write_op->flags = flags;
pj_memcpy(&write_op->rmt_addr, addr, addrlen);
write_op->rmt_addrlen = addrlen;
pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->write_list, write_op);
ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT);
pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
}
其中的内容也解释了前面可能不会调用 udp_on_write_complete 的原因。因为如果队列里面没有数据的情况下,会直接发送出去。
- pj_list_insert_before:如果来不及发送出去,则通过 queue 放到发送队列中,等待队列调用并发送出去。
由于我们这里只关注数据的发送,那么队列我们就暂时不看,先关注如果从队列中获取并发送出去,最后回调 on_write_complete 的。
ioqueue_dispatch_write_event
ioqueue_dispatch_write_event
的函数定义位于 pjlib/src/pj/ioqueue_common_abs.c 中,其中内容如下所示:
pj_bool_t ioqueue_dispatch_write_event( pj_ioqueue_t *ioqueue,
pj_ioqueue_key_t *h)
{
pj_status_t rc;
/* Try lock the key. */
rc = pj_ioqueue_trylock_key(h);
if (rc != PJ_SUCCESS) {
return PJ_FALSE;
}
if (IS_CLOSING(h)) {
pj_ioqueue_unlock_key(h);
return PJ_TRUE;
}
#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
if (h->connecting) {
/* Completion of connect() operation */
pj_status_t status;
pj_bool_t has_lock;
/* Clear operation. */
h->connecting = 0;
ioqueue_remove_from_set2(ioqueue, h, WRITEABLE_EVENT|EXCEPTION_EVENT);
#if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0)
/* from connect(2):
* On Linux, use getsockopt to read the SO_ERROR option at
* level SOL_SOCKET to determine whether connect() completed
* successfully (if SO_ERROR is zero).
*/
{
int value;
int vallen = sizeof(value);
int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR,
&value, &vallen);
if (gs_rc != 0) {
/* Argh!! What to do now???
* Just indicate that the socket is connected. The
* application will get error as soon as it tries to use
* the socket to send/receive.
*/
status = PJ_SUCCESS;
} else {
status = PJ_STATUS_FROM_OS(value);
}
}
#elif (defined(PJ_WIN32) && PJ_WIN32!=0) || (defined(PJ_WIN64) && PJ_WIN64!=0)
status = PJ_SUCCESS; /* success */
#else
/* Excellent information in D.J. Bernstein page:
* http://cr.yp.to/docs/connect.html
*
* Seems like the most portable way of detecting connect()
* failure is to call getpeername(). If socket is connected,
* getpeername() will return 0. If the socket is not connected,
* it will return ENOTCONN, and read(fd, &ch, 1) will produce
* the right errno through error slippage. This is a combination
* of suggestions from Douglas C. Schmidt and Ken Keys.
*/
{
struct sockaddr_in addr;
int addrlen = sizeof(addr);
status = pj_sock_getpeername(h->fd, (struct sockaddr*)&addr,
&addrlen);
}
#endif
/* Unlock; from this point we don't need to hold key's mutex
* (unless concurrency is disabled, which in this case we should
* hold the mutex while calling the callback) */
if (h->allow_concurrent) {
/* concurrency may be changed while we're in the callback, so
* save it to a flag.
*/
has_lock = PJ_FALSE;
pj_ioqueue_unlock_key(h);
} else {
has_lock = PJ_TRUE;
}
/* Call callback. */
if (h->cb.on_connect_complete && !IS_CLOSING(h))
(*h->cb.on_connect_complete)(h, status);
/* Unlock if we still hold the lock */
if (has_lock) {
pj_ioqueue_unlock_key(h);
}
/* Done. */
} else
#endif /* PJ_HAS_TCP */
if (key_has_pending_write(h)) {
/* Socket is writable. */
struct write_operation *write_op;
pj_ssize_t sent;
pj_status_t send_rc = PJ_SUCCESS;
/* Get the first in the queue. */
write_op = h->write_list.next;
/* For datagrams, we can remove the write_op from the list
* so that send() can work in parallel.
*/
if (h->fd_type == pj_SOCK_DGRAM()) {
pj_list_erase(write_op);
if (pj_list_empty(&h->write_list))
ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
}
/* Send the data.
* Unfortunately we must do this while holding key's mutex, thus
* preventing parallel write on a single key.. :-((
*/
sent = write_op->size - write_op->written;
if (write_op->op == PJ_IOQUEUE_OP_SEND) {
send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written,
&sent, write_op->flags);
/* Can't do this. We only clear "op" after we're finished sending
* the whole buffer.
*/
//write_op->op = 0;
} else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) {
int retry = 2;
while (--retry >= 0) {
send_rc = pj_sock_sendto(h->fd,
write_op->buf+write_op->written,
&sent, write_op->flags,
&write_op->rmt_addr,
write_op->rmt_addrlen);
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
/* Special treatment for dead UDP sockets here, see ticket #1107 */
if (send_rc==PJ_STATUS_FROM_OS(EPIPE) && !IS_CLOSING(h) &&
h->fd_type==pj_SOCK_DGRAM())
{
PJ_PERROR(4,(THIS_FILE, send_rc,
"Send error for socket %ld, retrying",
h->fd));
send_rc = replace_udp_sock(h);
continue;
}
#endif
break;
}
/* Can't do this. We only clear "op" after we're finished sending
* the whole buffer.
*/
//write_op->op = 0;
} else {
pj_assert(!"Invalid operation type!");
write_op->op = PJ_IOQUEUE_OP_NONE;
send_rc = PJ_EBUG;
}
if (send_rc == PJ_SUCCESS) {
write_op->written += sent;
} else {
pj_assert(send_rc > 0);
write_op->written = -send_rc;
}
/* Are we finished with this buffer? */
if (send_rc!=PJ_SUCCESS ||
write_op->written == (pj_ssize_t)write_op->size ||
h->fd_type == pj_SOCK_DGRAM())
{
pj_bool_t has_lock;
write_op->op = PJ_IOQUEUE_OP_NONE;
if (h->fd_type != pj_SOCK_DGRAM()) {
/* Write completion of the whole stream. */
pj_list_erase(write_op);
/* Clear operation if there's no more data to send. */
if (pj_list_empty(&h->write_list))
ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
}
/* Unlock; from this point we don't need to hold key's mutex
* (unless concurrency is disabled, which in this case we should
* hold the mutex while calling the callback) */
if (h->allow_concurrent) {
/* concurrency may be changed while we're in the callback, so
* save it to a flag.
*/
has_lock = PJ_FALSE;
pj_ioqueue_unlock_key(h);
PJ_RACE_ME(5);
} else {
has_lock = PJ_TRUE;
}
/* Call callback. */
if (h->cb.on_write_complete && !IS_CLOSING(h)) {
(*h->cb.on_write_complete)(h,
(pj_ioqueue_op_key_t*)write_op,
write_op->written);
}
if (has_lock) {
pj_ioqueue_unlock_key(h);
}
} else {
pj_ioqueue_unlock_key(h);
}
/* Done. */
} else {
/*
* This is normal; execution may fall here when multiple threads
* are signalled for the same event, but only one thread eventually
* able to process the event.
*/
pj_ioqueue_unlock_key(h);
return PJ_FALSE;
}
return PJ_TRUE;
}
从上面的代码很明显的看出,当发送数据完成回调了 pj_ioqueue_key_t 中的 on_write_complete 函数。这个函数,在我们初始化 pj_ioqueue_key_t 的时候已经放进去了