EasyRTSPServer流媒体服务器基于live555改造而来,live555天生是个单线程, 用在摄像机上效率没有问题,毕竟连接数不多,这已经在EasyIPCamera中得到验证. 如果用在NVR或流媒体服务器时,基于性能考量,则必须要改造为多线程。
这是一个比较困难的过程, 多线程的处理主要集中在GenericMediaServer, 并且要将多线程中的UsageEnvironment传递到各个子模块, 而不是单线程中的envir()就行了,具体参考下面的代码和注释。
多线程的结构体定义在GenericMediaServer中;
在创建GenericMediaServer时,创建多线程对象,但并不创建实际线程, 实际代码如下;
GenericMediaServer
::GenericMediaServer(UsageEnvironment& env, int ourSocketV4, int ourSocketV6, Port ourPort,
unsigned reclamationSeconds, void *_callback, void *_userptr)
: Medium(env),
fServerSocket4(ourSocketV4), fServerSocket6(ourSocketV6),
fServerPort(ourPort), fReclamationSeconds(reclamationSeconds),
fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)),
fClientConnections(HashTable::create(ONE_WORD_HASH_KEYS)),
fClientSessions(HashTable::create(STRING_HASH_KEYS)) {
ignoreSigPipeOnSocket(fServerSocket4); // so that clients on the same host that are killed don't also kill us
if (fServerSocket6 > 0)
{
ignoreSigPipeOnSocket(fServerSocket6); // so that clients on the same host that are killed don't also kill us
}
#ifdef LIVE_MULTI_THREAD_ENABLE
InitMutex(&mutexServerMediaSession);
InitMutex(&mutexClientConnection);
InitMutex(&mutexClientSession);
mCallbackPtr = _callback;
mUserPtr = _userptr;
memset(&multiThreadCore, 0x00, sizeof(MultiThread_CORE_T));
multiThreadCore.threadNum = MAX_DEFAULT_MULTI_THREAD_NUM;
multiThreadCore.threadTask = new LIVE_THREAD_TASK_T[multiThreadCore.threadNum];
memset(&multiThreadCore.threadTask[0], 0x00, sizeof(LIVE_THREAD_TASK_T) * multiThreadCore.threadNum);
for (int i=0; i<multiThreadCore.threadNum; i++)
{
char szName[36] = {0};
sprintf(szName, "worker thread %d", i);
multiThreadCore.threadTask[i].id = i;
multiThreadCore.threadTask[i].extPtr = this;
#ifdef _EPOLL_
multiThreadCore.threadTask[i].pSubScheduler = BasicTaskSchedulerEpoll::createNew(i+1, MAX_EPOLL_WORKER_THREAD_EVENT);
#else
multiThreadCore.threadTask[i].pSubScheduler = BasicTaskScheduler::createNew(i+1, MAX_EPOLL_WORKER_THREAD_EVENT);
#endif
multiThreadCore.threadTask[i].pSubEnv = BasicUsageEnvironment::createNew(*multiThreadCore.threadTask[i].pSubScheduler, i+1, szName);
}
#endif
// Arrange to handle connections from others:
env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket4, incomingConnectionHandler4, this);
if (fServerSocket6 > 0)
{
env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket6, incomingConnectionHandler6, this);
}
}
在收到DESCRIBE请求后,再根据URL中的后缀,创建相应线程, 代码如下:
UsageEnvironment *GenericMediaServer::GetEnvBySuffix(UsageEnvironment *pMainThreadEnv, const char *urlSuffix, void *pClientConnection,
LIVE_THREAD_TASK_T **pThreadTask, Boolean bLockServerMediaSession)
{
GenericMediaServer::ClientConnection *pClient = (GenericMediaServer::ClientConnection *)pClientConnection;
int iFreeIdx = -1;
UsageEnvironment *pEnv = NULL;
if ( (int)strlen(urlSuffix) < 1)
{
return NULL;
}
char streamName[512] = {0};
int iProcRet = 0;
Boolean bRequestTooMany = False;
if (bLockServerMediaSession) LockServerMediaSession(pMainThreadEnv->GetEnvirName(), (char*)"GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
do
{
for (int i=0; i<multiThreadCore.threadNum; i++)
{
if ( (iFreeIdx<0) && (((int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) < 1 )) && (multiThreadCore.threadTask[i].releaseChannel==0x00) )
{
iFreeIdx = i;
}
if ( 0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix))
{
if (multiThreadCore.threadTask[i].releaseChannel>0x00)
{
iProcRet = -1;
_TRACE(TRACE_LOG_DEBUG, (char *)"[%s] 当前通道正在被删除. 请稍候访问: %s\n", multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), urlSuffix);
break;
}
if (NULL == multiThreadCore.threadTask[i].pSubEnv)
{
iProcRet = -2;
break;
}
if (multiThreadCore.threadTask[i].pSubEnv->GetStreamStatus() == 0x00)
{
iProcRet = -3;
break;
}
multiThreadCore.threadTask[i].pSubEnv->LockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
if (multiThreadCore.threadTask[i].pSubEnv->GetLockFlag() != 0x00)
{
iProcRet = -4;
multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
break;
}
bool assignEnv = false;
for (int k=0; k<MAX_BATCH_CLIENT_NUM; k++)
{
if (NULL == multiThreadCore.threadTask[i].pClientConnectionPtr[k])
{
assignEnv = true;
multiThreadCore.threadTask[i].pClientConnectionPtr[k] = pClient;
_TRACE(TRACE_LOG_INFO, (char*)"GenericMediaServer::GetEnvBySuffix [%s] set [%d] to Index[%d]\n", urlSuffix, pClient->fOurSocket, k);
strcpy(streamName, urlSuffix);
break;
}
}
if (assignEnv)
{
pEnv = multiThreadCore.threadTask[i].pSubEnv;
//multiThreadCore.threadTask[i].subSocket = pClient->fOurSocket;
pClient->pClientConnectionEnv = multiThreadCore.threadTask[i].pSubEnv;
//multiThreadCore.threadTask[i].handleDescribe = 0x01;
//*handleDescribe = &multiThreadCore.threadTask[i].handleDescribe;
if (NULL != pThreadTask) *pThreadTask = &multiThreadCore.threadTask[i];
multiThreadCore.threadTask[i].clientNum ++;
pEnv->IncrementReferenceCount(); //增加引用计数
//_TRACE(TRACE_LOG_WARNING, (char*)"######## pEnv->IncrementReferenceCount 增加引用计数[%d].\n", pEnv->GetReferenceCount());
iProcRet = 0;
_TRACE(TRACE_LOG_INFO, (char*)"共用通道GenericMediaServer::GetEnvBySuffix:: Channel already exist. New Connection[%d] [%s][%s] ClientNum[%d]\n",
pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), urlSuffix,
multiThreadCore.threadTask[i].clientNum);
}
else
{
//没有找到有效的Env, 说明客户端列表已满
iProcRet = -10;
_TRACE(TRACE_LOG_ERROR, (char*)"GenericMediaServer::GetEnvBySuffix 当前通道客户端已满[%s]\n", urlSuffix);
}
multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
break;
}
}
if (pEnv) break;
if (iFreeIdx<0) break;
if (iProcRet < 0) break;
if (NULL == multiThreadCore.threadTask[iFreeIdx].osThread)
{
CreateOSThread( &multiThreadCore.threadTask[iFreeIdx].osThread, __WorkerThread_Proc, (void *)&multiThreadCore.threadTask[iFreeIdx] );
}
multiThreadCore.threadTask[iFreeIdx].pClientConnectionPtr[0] = pClient;
//multiThreadCore.threadTask[iFreeIdx].subSocket = pClient->fOurSocket;
pClient->pClientConnectionEnv = multiThreadCore.threadTask[iFreeIdx].pSubEnv;
pEnv = pClient->pClientConnectionEnv;
strcpy(multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, urlSuffix);
strcpy(streamName, multiThreadCore.threadTask[iFreeIdx].liveURLSuffix);
pEnv->IncrementReferenceCount(); //增加引用计数
//_TRACE(TRACE_LOG_WARNING, (char*)"######## pEnv->IncrementReferenceCount 初始引用计数为%d.\n", pEnv->GetReferenceCount());
//envir().taskScheduler().disableBackgroundHandling(pClient->fOurSocket);
//pClient->pEnv->taskScheduler().turnOnBackgroundReadHandling(pClient->fOurSocket, (TaskScheduler::BackgroundHandlerProc*)&GenericMediaServer::ClientConnection::incomingRequestHandler, this);
//multiThreadCore.threadTask[iFreeIdx].handleDescribe = 0x01;
//*handleDescribe = &multiThreadCore.threadTask[iFreeIdx].handleDescribe;
if (NULL != pThreadTask) *pThreadTask = &multiThreadCore.threadTask[iFreeIdx];
multiThreadCore.threadTask[iFreeIdx].clientNum ++;
_TRACE(TRACE_LOG_INFO, (char*)"新建通道 GenericMediaServer::GetEnvBySuffix New Connection[%d] [%s][%s] ClientNum[%d]\n",
pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(),
multiThreadCore.threadTask[iFreeIdx].liveURLSuffix,
multiThreadCore.threadTask[iFreeIdx].clientNum);
}while (0);
if (bLockServerMediaSession) UnlockServerMediaSession(pMainThreadEnv->GetEnvirName(), "GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
return pEnv;
}
这样, 每个不同的请求将分配给对应的线程进行处理,线程之间互不干扰;
在某一个通道对应的所有客户端都断开后, 该通道资源将被回复, 但相应线程不会被删除, 一方面是为了再次复用, 另一方面是为了稳定性考量;