c++ python 共享内存

发布于:2025-07-07 ⋅ 阅读:(14) ⋅ 点赞:(0)

一、目的

是为了c++来读取并解码传递给python,Python做测试非常方便,c++ 和 python之间必须定好协议,整体使用c++ 来解码,共享内存传递给python

二、主类

主类,串联decoder,注意decoder并没有直接在显存里面穿透,是解码以后传递给内存,从内存传给python

#pragma once

#define __STDC_CONSTANT_MACROS
#define SDL_MAIN_HANDLED

#include "c_ringbuffer.h"
#include "TThreadRunable.h"
//#include "FFMPEGLibrary.h"
#include "c_script.h"
#include "memshare.h"
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/pixdesc.h>
#include <libavutil/hwcontext.h>
#include <libavutil/opt.h>
#include <libavutil/avassert.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include "SDL2\SDL.h"
#include "SDL2\sdl_ttf.h"
};
#include <opencv2/opencv.hpp>
#include "c_plug.h"
#include "IO_Thread.h"
//#include  "process.h"



#pragma execution_character_set("utf-8")

static void SaveRgb2Bmp(char* rgbbuf, int width, int height, char *file)
{
	BITMAPINFO bitmapinfo;
	ZeroMemory(&bitmapinfo, sizeof(BITMAPINFO));
	bitmapinfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
	bitmapinfo.bmiHeader.biWidth = width;
	bitmapinfo.bmiHeader.biHeight = 0 - height;
	bitmapinfo.bmiHeader.biPlanes = 1;
	bitmapinfo.bmiHeader.biBitCount = 24;
	bitmapinfo.bmiHeader.biXPelsPerMeter = 0;
	bitmapinfo.bmiHeader.biYPelsPerMeter = 0;
	bitmapinfo.bmiHeader.biSizeImage = width * height;
	bitmapinfo.bmiHeader.biClrUsed = 0;
	bitmapinfo.bmiHeader.biClrImportant = 0;

	BITMAPFILEHEADER bmpHeader;
	ZeroMemory(&bmpHeader, sizeof(BITMAPFILEHEADER));
	bmpHeader.bfType = 0x4D42;
	bmpHeader.bfOffBits = sizeof(BITMAPINFOHEADER) + sizeof(BITMAPFILEHEADER);
	bmpHeader.bfSize = bmpHeader.bfOffBits + width * height * 3;

	//fopen("", "wb");
	FILE* fp;
	fopen_s(&fp, file, "wb");
	if (fp)
	{
		fwrite(&bmpHeader, 1, sizeof(BITMAPFILEHEADER), fp);
		fwrite(&(bitmapinfo.bmiHeader), 1, sizeof(BITMAPINFOHEADER), fp);
		fwrite(rgbbuf, 1, width*height * 3, fp);
		fclose(fp);
	}
}

typedef  std::function<void(cv::Mat)> funcmat_process;

typedef struct func_process
{
	//串联函数名称
	std::string funcname;
	funcmat_process v_func_callback = NULL;
	//func_process* next;
}func_process;



class c_SDLDrawer 
{

	int					m_w = 0, m_h = 0;
	SDL_Window			*screen = NULL;
	SDL_Renderer		*sdlRenderer = NULL;
	SDL_Texture			*sdlTexture = NULL;

	bool m_window_init = false;

	TTF_Font * _font = NULL;


	lfringqueue<AVFrame, 20>* v_pframes ;
	//画布
	AVFrame * v_canvas_frame = NULL;

	void draw(uint8_t *data[], int linesize[], int px, int py, int w, int h, int bias)
	{

		if (sdlTexture == NULL)
		{
			sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV,
				SDL_TEXTUREACCESS_STREAMING, px, py);

		}
		SDL_UpdateYUVTexture(sdlTexture, NULL,
			data[0], linesize[0],
			data[1], linesize[1],
			data[2], linesize[2]);
		if (bias != 0)
		{
			SDL_Rect			sdlRect;
			//zoom 放大
			sdlRect.x = bias;
			sdlRect.y = bias;
			sdlRect.w = w - 2 * bias;
			sdlRect.h = h - 2 * bias;// nh;
			SDL_RenderCopy(sdlRenderer, sdlTexture, &sdlRect, NULL);
		}
		else
		{
			//SDL_RenderClear(sdlRenderer);
			SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, NULL);
		}
		SDL_RenderPresent(sdlRenderer);

	}

	//px_l 向左偏移
	//py_r 向右便宜
	void drawBGR(uint8_t *data, int w, int h, int px, int py, int drawrect)
	{
		if (sdlTexture == NULL)
		{
			sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_BGR24,
				SDL_TEXTUREACCESS_STREAMING, w, h);


		}
		SDL_UpdateTexture(sdlTexture, NULL, data, w * 3);

		if (v_after_cut.w != 0) // 需要裁剪
		{
			if (px != 0 || py != 0) //裁剪后位移
			{
				int w_, h_;
				SDL_GetWindowSize(screen, &w_, &h_);
				SDL_Rect dstrect = { 0,0,w_,h_ };
				dstrect.x += px;
				dstrect.y += py;
				SDL_RenderClear(sdlRenderer);
				SDL_RenderCopy(sdlRenderer, sdlTexture, &v_after_cut, &dstrect);
			}
			else
				SDL_RenderCopy(sdlRenderer, sdlTexture, &v_after_cut, NULL);
		}
		else
		{
			if (px != 0 || py != 0)
			{
				int w_, h_;
				SDL_GetWindowSize(screen, &w_, &h_);
				SDL_Rect dstrect = { 0,0,w_,h_ };
				dstrect.x += px;
				dstrect.y += py;
				SDL_RenderClear(sdlRenderer);
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &dstrect);
			}
			else
			{
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, NULL);
			}
		}

		/*if (px != 0 || py != 0)
		{
			int w_, h_;
			SDL_GetWindowSize(screen, &w_, &h_);
			SDL_Rect dstrect = { 0,0,w_,h_ };
			dstrect.x += px;
			dstrect.y += py;
			SDL_RenderClear(sdlRenderer);
			SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &dstrect);
		}
		else
		{
			int res = v_after_cut.w*v_after_cut.h;
			if (res == 0)
			{
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, NULL);
			}
			else
			{
				SDL_RenderCopy(sdlRenderer, sdlTexture, &v_after_cut, NULL);
			}
			
		}*/
	
		SDL_Color color = { 255,255,255,255 };

		string show = "this is a test钱波";
		if (drawrect)
			SDL_RenderDrawRect(this->sdlRenderer, NULL);
		displayText("this is a test钱波", { 10,10 },color);
		color = { 255,0,0,255 };
		displayText("集装箱", { 500,130 },color);

		SDL_RenderPresent(sdlRenderer);
	}
	
	bool displayText(std::string text, SDL_Point point,SDL_Color color)
	{
		if (_font == NULL)
			return false;
		SDL_Surface * surface = TTF_RenderUTF8_Blended(_font, text.c_str(), color);
		SDL_Texture * texture = SDL_CreateTextureFromSurface(sdlRenderer, surface);
		int w = 0, h = 0;
		TTF_SizeUTF8(_font, text.c_str(), &w, &h);
		SDL_Rect des = { point.x, point.y, w, h };
		SDL_RenderCopy(sdlRenderer, texture, NULL, &des);
		SDL_FreeSurface(surface);
		SDL_DestroyTexture(texture);
		return true;
#if 0
		TTF_SetFontOutline(_font, 1);
		int w = 0;
		int h = 0;

		if ((TTF_SizeText(_font, "SDL_ttf is awesome!", &w, &h) != -1)) {
			// Print out the width and height of the string if I render it with myFont
			std::cout << "Width : " << w << "\nHeight: " << h << std::endl;
		}
		else {
			// Error...
		}
#endif

	}

public:
	int func_init(const void* hWnd, lfringqueue<AVFrame, 20>* frame)
	{
		v_pframes = frame;
		//v_hwnd = hWnd;
		if (m_window_init == false)
		{
			TTF_Init();
			if (_font == NULL)
			{
				_font = TTF_OpenFont("./STXIHEI.TTF", 20);
				if (_font == NULL) //文字渲染出错
					return -1;
			}
			/* 设置字体样式(加粗|斜体)*/
			TTF_SetFontStyle(_font, TTF_STYLE_BOLD /*| TTF_STYLE_ITALIC*/);

			SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "1");
			//SDL_setenv(SDL_HINT_RENDER_SCALE_QUALITY, "linear", 0);
			SDL_Init(SDL_INIT_VIDEO);
			if (hWnd == NULL)
			{
				screen = SDL_CreateWindow("FF", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
					1920, 1000, SDL_WINDOW_SHOWN/* SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE*/);
			}
			else
			{
				screen = SDL_CreateWindowFrom(hWnd);
			}

			if (screen == NULL)
			{
				//printf("Window could not be created! SDL_Error: %s\n", SDL_GetError());
				return -1;
			}
			sdlRenderer = SDL_CreateRenderer(screen, 0, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
			//sdlRenderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
			//SDL_SetHint(SDL_HINT_RENDER_DRIVER, "opengl");

			m_window_init = true;

		}
		//设置画矩形的颜色为红色
		SDL_SetRenderDrawColor(sdlRenderer,   // 渲染器
			0,     // 红
			0,     // 绿
			0,     // 蓝
			128);     // 透明值


		return 0;
	}


	//接口函数
	//w 高度
	//h 宽度
	//bias 偏差
	//int drawrect 是否画外框矩形
	void func_drawYUV(int w, int h, int bias, int drawrect)
	{
		AVFrame* frame = NULL;
		if (v_pframes->dequeue(&frame))
		{
			draw(frame->data, frame->linesize, frame->width, frame->height, w, h, bias);
			av_freep(&frame->data[0]);
			av_frame_free(&frame);
		}
	}
	
	//int px x轴方向减去或者加上多少个像素
	//int py y轴方向减去或者加上多少个像素
	void func_draw(int px, int py, AVFrame* frame,int drawrect)
	{

		if (px < 0)
			v_cutleft = px;//record the left 
		if (px > 0)
			v_cutright = px; //record the right
		if (py < 0)
			v_cuttop = py;
		if (py > 0)
			v_cutbottom = py;
		
		drawBGR(frame->data[0], frame->width, frame->height, px, py, drawrect);
	}


	int v_cutleft = 0;
	int v_cutright = 0;
	//int v_cutleftright = 0;//1 切除left  2 切除right  3 切除左右 

	int v_cuttop = 0;
	int v_cutbottom = 0;
	//int v_cuttopbottom = 0;//1 切除top  2 切除bottom  3 切除上下
	//裁剪掉黑边后的rect,需要计算
	SDL_Rect v_after_cut = {0,0,0,0};
	void func_cut(int windowx,int windowy, int imagew, int imageh)
	{
		if (v_after_cut.w == 0 || v_after_cut.h == 0)
		{
			v_after_cut.w = imagew;
			v_after_cut.h = imageh;
		}
		float w_ = (float)windowx;
		float h_ = (float)windowy;
		//int w_, h_;
		//SDL_GetWindowSize(screen, &w_, &h_);

		//px / ppx = window_w / imagew;

		if (v_cutleft < 0) //左边需要切除
		{
			int tmpx = v_cutleft * imagew;
			float pp = (float)tmpx / w_ ;
			pp = 0 - pp;
			int ppx = (int)(pp + 0.5);
			//根据比例计算
			v_after_cut.x += ppx;
			v_after_cut.w -= ppx;
			v_cutleft = 0;
		}
		if (v_cutright > 0) //右边需要切除
		{
			int tmpx = v_cutright * imagew;
			float pp = (float)tmpx/w_;
			int ppx = (int)(pp + 0.5);
			v_after_cut.w -= ppx;
			v_cutright = 0;
		}

		if (v_cuttop < 0) //上边需要切除
		{
			float tmpy = float(v_cuttop * imageh);
			float pp = tmpy / h_;
			int ppy = (int)(pp+0.5);
			v_after_cut.y += (0-ppy); 
			v_after_cut.h -= (0-ppy);
			v_cuttop = 0;
		}
		if (v_cutbottom > 0) //下边需要切除
		{
			int tmpy = v_cutbottom * imageh;
			float pp = (float)tmpy / h_;
			int ppy = (int)(pp + 0.5);
			v_after_cut.h -= ppy;
			v_cutbottom = 0;
		}
	}


	void func_restore()
	{
		//所有值都恢复
		v_after_cut = { 0, 0, 0, 0};
		v_cutleft = 0;
		v_cutright = 0;
		v_cuttop = 0;
		v_cutbottom = 0;
	}
public:

	//在图像上画上矩形,表示选中状态
	void func_draw_rect()
	{
		SDL_RenderDrawRect(this->sdlRenderer, NULL);
	}

	void UnInit()
	{
		if (sdlTexture)
			SDL_DestroyTexture(sdlTexture);

		if (sdlRenderer)
			SDL_DestroyRenderer(sdlRenderer);

		if (screen)
		{
			SDL_DestroyWindow(screen);
			//SDL_ShowWindow(_pWindow);
			//if (_hWnd!=NULL)
			//	ShowWindow(_hWnd, 1);
			screen = NULL;
		}
		if (_font != NULL) {
			TTF_CloseFont(_font);
			_font = NULL;
		}
		TTF_Quit();
		SDL_Quit();
	}

	c_SDLDrawer()
	{

	}

	~c_SDLDrawer()
	{
		UnInit();
	}

};

//SDL_FillRect(gScreenSurface, NULL, SDL_MapRGB(gScreenSurface->format, 0xFF, 0x00, 0x00));


static enum AVPixelFormat hw_pix_fmt = AV_PIX_FMT_DXVA2_VLD;

static enum AVPixelFormat get_hw_format(AVCodecContext* ctx,
	const enum AVPixelFormat* pix_fmts)
{
	const enum AVPixelFormat* p;

	for (p = pix_fmts; *p != -1; p++) {
		if (*p == hw_pix_fmt)
			return *p;
	}

	fprintf(stderr, "Failed to get HW surface format.\n");
	return AV_PIX_FMT_NONE;
}

typedef enum enum_record
{
	enum_record_none,
	enum_record_write,
	enum_record_write_lock,
	enum_record_write_unlock
}enum_record;

class c_AVDecoder :public TThreadRunable
{
	videoInType v_type = en_mode_client; //如rtsp
	AVPixelFormat sourcepf = AV_PIX_FMT_NV12;// AV_PIX_FMT_NV12;// AV_PIX_FMT_YUV420P;
	AVPixelFormat destpf = AV_PIX_FMT_BGR24; //AV_PIX_FMT_YUV420P

	AVBufferRef* hw_device_ctx = NULL;
	lfringqueue<AVFrame, 20>* v_pframes = NULL;

	struct SwsContext* img_convert_ctx = NULL;
	AVFormatContext* input_ctx = NULL;
	//录像
	AVFormatContext* output_ctx = NULL;
	//是否已经有关键帧
	int v_FindKeyPkt = 0;


	int stream_video_num;
	int stream_audio_num;
	AVStream* stream_video = NULL;
	AVStream* stream_audio = NULL;

	AVCodecContext* decoder_ctx = NULL;
	AVCodec* decoder = NULL;

	AVPacket packet;
	
	enum AVHWDeviceType type;
	//转换成yuv420 或者rgb
	
	//默认client模式
	int v_clientserver = 0;

	int hw_decoder_init(AVCodecContext* ctx, const enum AVHWDeviceType type)
	{
		int err = 0;

		if ((err = av_hwdevice_ctx_create(&hw_device_ctx, type,
			NULL, NULL, 0)) < 0) {
			fprintf(stderr, "Failed to create specified HW device.\n");
			return err;
		}
		ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);

		return err;
	}

	int decode_write(AVCodecContext* avctx, AVPacket* packet)
	{
		AVFrame* frame = NULL, *sw_frame = NULL;
		AVFrame* tmp_frame = NULL;
		AVFrame* pFrameDst = NULL;
		unsigned char* out_buffer = NULL;
		int ret = 0;
		ret = avcodec_send_packet(avctx, packet);
		if (ret < 0) {
			fprintf(stderr, "Error during decoding\n");
			return ret;
		}
		static int i = 0;
		while (1) {
			if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
				fprintf(stderr, "Can not alloc frame\n");
				ret = AVERROR(ENOMEM);
				goto fail;
			}
			//avctx->get_buffer2
			ret = avcodec_receive_frame(avctx, frame);
			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
				av_frame_free(&frame);
				av_frame_free(&sw_frame);
				return 0;
			}
			else if (ret < 0) {
				fprintf(stderr, "Error while decoding\n");
				goto fail;
			}
			if (img_convert_ctx == NULL)
			{
				img_convert_ctx = sws_getContext(avctx->width, avctx->height, sourcepf,
					avctx->width, avctx->height, destpf, SWS_FAST_BILINEAR, NULL, NULL, NULL);
			}
			if (frame->format == hw_pix_fmt) {
				/* retrieve data from GPU to CPU */
				sw_frame->format = sourcepf; // AV_PIX_FMT_NV12;// AV_PIX_FMT_YUV420P;// AV_PIX_FMT_NV12;// AV_PIX_FMT_BGR24;// AV_PIX_FMT_YUV420P;
				if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
					fprintf(stderr, "Error transferring the data to system memory\n");
					goto fail;
				}
				tmp_frame = sw_frame;
			}
			else
				tmp_frame = frame;

			pFrameDst = av_frame_alloc();
			av_image_alloc(pFrameDst->data, pFrameDst->linesize, avctx->width, avctx->height, destpf, 1);

			sws_scale(img_convert_ctx, tmp_frame->data, tmp_frame->linesize,
				0, avctx->height, pFrameDst->data, pFrameDst->linesize);


			pFrameDst->width = avctx->width;
			pFrameDst->height = avctx->height;
			if (!v_pframes->enqueue(pFrameDst, 3))
			{
				av_freep(&pFrameDst->data[0]);
				av_frame_free(&pFrameDst);
			}
		fail:
			av_frame_free(&frame);
			av_frame_free(&sw_frame);
			if (ret < 0)
				return ret;
		}
	}


	std::string v_url;
	std::string v_record_file;
	//0非录制状态,1录制状态
	enum_record v_record = enum_record_none;

private:
	int Record_Stream_Add();
	int Record_Stream_Open();
	int Record_Stream_Write(AVPacket* pkt);
	int Record_Stream_Close();
	int Record_Stream(AVPacket* pkt);
public:
	void func_init(const char* url, lfringqueue<AVFrame, 20>* frame = NULL)
	{
		v_url = url;
		v_pframes = frame;
		v_clientserver = 0;
	}
	int func_record_start(const char* file)
	{
		int ret = -1;
		if (file == NULL)
			return ret;

		switch (v_record)
		{
		case enum_record_none:
			v_record = enum_record_write;
			v_record_file = file;
			ret = 0;
			break;
		case enum_record_write://正在写,则锁定,需要close
			//v_record = enum_record_write_lock;
			ret = -1;
			break;
		case enum_record_write_lock://无法修改
			ret = -2;
			break;
		case enum_record_write_unlock: //写完过文件
			v_record = enum_record_write;
			v_record_file = file;
			ret = 0;
			break;
		}
		return ret;
	}
	void func_record_close()
	{
		if (v_record == enum_record_none)
			return;
		if(v_record == enum_record_write)
			v_record = enum_record_write_lock;
		return;
	}
    //非本线程Run函数启动,其他线程会调用解码函数时需要初始化
	int func_init(const char* url ,lfringqueue<AVFrame, 20>* frame, int codecID,uint8_t * extradata,int extradatalen)
	{
		v_url = url;
		v_pframes = frame;
		//0 is client mode
		//1 is server mode
		v_clientserver = 1;
		decoder = avcodec_find_decoder((AVCodecID)codecID);
		return 0;
	}

public:


	int func_cycle()
	{
		const char* stype = "dxva2";
		int ret = 0;
		type = av_hwdevice_find_type_by_name(stype);
		if (type == AV_HWDEVICE_TYPE_NONE) {
			fprintf(stderr, "Device type %s is not supported.\n", stype);
			fprintf(stderr, "Available device types:");
			while ((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE)
				fprintf(stderr, " %s", av_hwdevice_get_type_name(type));
			fprintf(stderr, "\n");
			return -1;
		}

		/* open the input file */
		//const char * filename = "h:/video/a.mp4";
		AVDictionary* opts = NULL;
		av_dict_set(&opts, "rtsp_transport", "tcp", 0);
		av_dict_set(&opts, "buffer_size", "2048000", 0);
		av_dict_set(&opts, "fpsprobesize", "5", 0);
		//"-probesize 32 "
		av_dict_set(&opts, "analyzeduration", "5000000", 0);

		if (avformat_open_input(&input_ctx, v_url.c_str(), NULL, &opts) != 0) {
			av_dict_free(&opts);
			fprintf(stderr, "Cannot open input file '%s'\n", v_url.c_str());
			return -1;
		}
		av_dict_free(&opts);
		if (avformat_find_stream_info(input_ctx, NULL) < 0) {
			//av_dict_free(&opts);
			fprintf(stderr, "Cannot find input stream information.\n");
			return -1;
		}

		/* find the video stream information */
		stream_video_num = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);

		if (stream_video < 0) {
			//av_dict_free(&opts);
			fprintf(stderr, "Cannot find a video stream in the input file\n");
			return -1;
		}

		AVCodec* decoder_audio;
		stream_audio_num = av_find_best_stream(input_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &decoder_audio, 0);

		if (stream_audio_num < 0) {
			//av_dict_free(&opts);
			fprintf(stderr, "Cannot find a audio stream in the input file\n");
		}


		AVStream* stream = input_ctx->streams[stream_video_num];



		if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
			return AVERROR(ENOMEM);

		stream_video = input_ctx->streams[stream_video_num];
		if (avcodec_parameters_to_context(decoder_ctx, stream_video->codecpar) < 0)
			return -1;

		decoder_ctx->get_format = get_hw_format;

		if (hw_decoder_init(decoder_ctx, type) < 0)
			return -1;


		if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
			fprintf(stderr, "Failed to open codec for stream #%u\n", stream_video_num);
			return -1;
		}

		while (ret >= 0) {

			
				
			if ((ret = av_read_frame(input_ctx, &packet)) < 0)
				break;
	
			
			if (stream_video_num == packet.stream_index)
			{
				//这里需要更加精确的计算
				ret = decode_write(decoder_ctx, &packet);
			}
			else //音频包等等
			{

			}
			Record_Stream(&packet);
			av_packet_unref(&packet);
			if (IsStop())
				break;
		}

		/* flush the decoder */
		packet.data = NULL;
		packet.size = 0;
		ret = decode_write(decoder_ctx, &packet);
		av_packet_unref(&packet);


		avcodec_free_context(&decoder_ctx);
		avformat_close_input(&input_ctx);
		av_buffer_unref(&hw_device_ctx);

		return 0;
	}

	queue<RTPPacket*> _queue;
	void push(uint8_t* data,int len)
	{
		RTPPacket* pkt = new RTPPacket();
		pkt->data = new uint8_t[len];
		memcpy(pkt->data,  data,len);
		pkt->len = len;
		_mutex.lock();
		if (_queue.size() > 20)
		{
			for (int i = 0; i < 20; i++)
			{
				RTPPacket* pkt = _queue.front();
				free(pkt->data);
				free(pkt);
				_queue.pop();
			}
		}
		_queue.push(pkt);
		_mutex.unlock();
	}
	bool v_keyframemeet = false;
	//server模式的视频
	int func_cycle_server2()
	{
		const char* stype = "dxva2";
		type = av_hwdevice_find_type_by_name(stype);
		if (type == AV_HWDEVICE_TYPE_NONE) {
			fprintf(stderr, "Device type %s is not supported.\n", stype);
			fprintf(stderr, "Available device types:");
			while ((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE)
				fprintf(stderr, " %s", av_hwdevice_get_type_name(type));
			fprintf(stderr, "\n");
			return -1;
		}
		
		for (int i = 0;; i++) {
			const AVCodecHWConfig* config = avcodec_get_hw_config(decoder, i);
			if (!config) {
				fprintf(stderr, "Decoder %s does not support device type %s.\n",
					decoder->name, av_hwdevice_get_type_name(type));
				return -1;
			}
			if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
				config->device_type == type) {
				hw_pix_fmt = config->pix_fmt;
				break;
			}
		}
		if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
			return AVERROR(ENOMEM);
		decoder_ctx->get_format = get_hw_format;

		if (hw_decoder_init(decoder_ctx, type) < 0)
			return -1;
		int ret = 0;
		if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
			fprintf(stderr, "Failed to open codec for stream #%u\n", stream_video_num);
			return -1;
		}
		//AVFrame* frame = av_frame_alloc();
		int v_timeCount = 0;
		while (1)
		{
			bool need_sleep = 0;
			if (_stop == 1)
			{
				//qDebug()<< "IO thread break out";
				break;
			}
			
			_mutex.lock();
			need_sleep = _queue.empty();
			_mutex.unlock();

			if (need_sleep) //no packet 
			{
				std::this_thread::yield(); //让渡线程
				//std::this_thread::sleep_for(std::chrono::milliseconds(20));//休整20毫秒
				if (v_timeCount > 50) //连续50次没有收到包
				{
					//qDebug()<<"IO thread broken break";
					v_timeCount = 0;
					//自动退出线程
					break;//连续包已破//数据接收不到,发送破包信息
				}
			}
			else
			{
				v_timeCount = 0;
				_mutex.lock();
				RTPPacket* pkt = _queue.front();//RTP数据包队列
				_queue.pop();
				_mutex.unlock();
				if (!v_keyframemeet)
				{
					uint8_t* pos = pkt->data;
					while (!*(pos++));
					uint8_t flag = (*pos) & 0x1f;
					if (flag == 0x07)//只能适应h264
					{
						v_keyframemeet = 1;

						AVPacket avpkt;
						av_init_packet(&avpkt);
						avpkt.data = pkt->data;
						avpkt.size = pkt->len;
						decode_write(decoder_ctx, &avpkt);
						//decode_write2(decoder_ctx, &avpkt, frame);
					}
				}
				else
				{
					AVPacket avpkt;
					av_init_packet(&avpkt);
					avpkt.data = pkt->data;
					avpkt.size = pkt->len;
					decode_write(decoder_ctx, &avpkt);
					//decode_write2(decoder_ctx, &avpkt, frame);
				}
				free(pkt->data);
				free(pkt);
			}
		}
		return 0;

	}

	int func_get_decode_width()
	{
		if (decoder_ctx != NULL)
			return decoder_ctx->width;
		return 0;
	}
	int func_get_decode_height()
	{
		if (decoder_ctx != NULL)
			return decoder_ctx->height;
		return 0;
	}

	void Run()
	{
		while (1)
		{
			if (IsStop())
				break;
			//if (!IsStop())
			if (v_clientserver == 0)
				func_cycle();
			else
				func_cycle_server2();
		}
	}
	void func_stop()
	{
		Stop();
		Join();
	}
	c_AVDecoder()
	{}
	~c_AVDecoder()
	{
	
	}
};

//串联av decoder
class c_av_func 
{
public:
	c_av_func()
	{

	}
	~c_av_func()
	{
		
	}
private:
	std::string v_url;
	lfringqueue<AVFrame, 20> v_frames;

	c_SDLDrawer v_drawer;
	c_AVDecoder v_decoder;
	//连锁函数,串联
	std::vector<func_process> v_funcs;
	std::mutex v_mutex;

	std::mutex v_mutex_cache;
	class Lock {
	private:
		std::lock_guard<std::mutex> v_lock;
		//std::unique_lock<std::mutex> m_lock;
		//	T *m_data;
	public:
		inline Lock(c_av_func* parent) : v_lock(parent->v_mutex)
		{}
	};
private:
	void func(cv::Mat nmat)
	{
		Lock lock(this);
		auto iter = v_funcs.begin();
		while (iter != v_funcs.end())
		{
			funcmat_process process = (*iter).v_func_callback;
			process(nmat);
			iter++;
		}
	}

public:
	//以客户端模式启动得输入
	void func_init(const void* hWnd, const char* url)
	{
		v_url = url;
		v_drawer.func_init(hWnd, &v_frames);
		v_decoder.func_init(url, &v_frames);
	}
	void func_push_server_data(uint8_t* data, int len)
	{
		v_decoder.push(data, len);
	}
	//以服务器模式启动得输入
	void func_init_server(const void* hWnd, const char* url, int codecid,uint8_t* extradata, int extradatalen)
	{
		v_url = url;
		v_drawer.func_init(hWnd, &v_frames);
		v_decoder.func_init(url, &v_frames,codecid, extradata, extradatalen);
	}
	void func_start()
	{
		v_decoder.Start();
	}
	void func_stop()
	{
		v_decoder.func_stop();
	}

	void func_record_start(const char* filename)
	{
		v_decoder.func_record_start(filename);
	}
	void func_record_close()
	{
		v_decoder.func_record_close();
	}
	//插入一个串联函数
	void func_add(func_process process)
	{
		Lock lock(this);
		v_funcs.push_back(process);
	}
	void func_dec(const char* name)
	{
		Lock lock(this);
		auto iter = v_funcs.begin();
		while (iter != v_funcs.end())
		{
			if ((*iter).funcname.compare(name) == 0)
			{
				v_funcs.erase(iter);
				break;
			}
			iter++;
		}
	}

	//

	AVFrame* v_cache = NULL;
	int v_need_cache = 0;
	AVFrame* func_getrgb(AVFrame** frame)
	{
		*frame = NULL;
		std::lock_guard<std::mutex> lock(v_mutex_cache);
		if (v_cache == NULL)
			return v_cache;
		else
		{
			*frame = v_cache;
			v_cache = NULL;
		}
		return *frame;
	}

	int func_get_decode_width()
	{
		return v_decoder.func_get_decode_width();
	}
	int func_get_decode_height()
	{
		return v_decoder.func_get_decode_height();
	}
	//
	std::list<PlugIn_ptr> v_plugins;
	std::mutex _plugin_;
	void plugin_add(std::string name)
	{
		PlugIn_ptr in = c_plugins::instance()->Get(name);
		int find = 0;
		std::lock_guard<std::mutex> lock(_plugin_);
		auto iter = v_plugins.begin();
		while (iter != v_plugins.end())
		{
			if ((*iter)->name.compare(in->name) == 0)
			{
				find = 1;
				break;
			}
			iter++;
		}
		if (find == 0)
		{
			v_plugins.push_back(in);
		}
	}
	//从链中删除
	void plugin_delete(std::string name)
	{
		std::lock_guard<std::mutex> lock(_plugin_);
		auto iter = v_plugins.begin();
		while (iter != v_plugins.end())
		{
			//PlugIn* tmp = *iter;
			if ((*iter)->name.compare(name) == 0)
			{
				v_plugins.erase(iter);
				break;
			}
			iter++;
		}
	}
#ifdef CSTRING
	void GetPlugInName(vector<CString>& names)
	{
		USES_CONVERSION;
		std::lock_guard<std::mutex> lock(_plugin_);
		auto iter = v_plugins.begin();
		while (iter != v_plugins.end())
		{
			CString str = A2W((*iter)->name.c_str());
			//CString str = (*iter)->name.c_str();
			names.emplace_back(str);
			iter++;
		}
	}
#endif
	///

	//画一帧图像
	void func_draw(int px, int py, int drawrect)
	{
		AVFrame* frame = NULL;
		if (v_frames.dequeue(&frame))
		{

			/*cv::Mat nmat;
			nmat.cols = frame->width;
			nmat.rows = frame->height;
			nmat.data = frame->data[0];*/
			//装载畸形矫正算法
			//if (!v_funcs.empty())
			
			auto iter = v_plugins.begin();
			while (iter != v_plugins.end())
			{
				(*iter)->FUNC_worker(frame->data[0], frame->width, frame->height,NULL);
				iter++;
			}
			
			
			v_drawer.func_draw(px, py, frame, drawrect);
			{
				和python 脚本进行交互
				//if (v_script != nullptr && !v_script->IsStop())
				//{
				//	if (frame->key_frame)
				//	{
				//		int w = frame->width;
				//		int h = frame->height;
				//		//数据写入共享内存
				//		mem_info_ptr ptr = v_script->mem_getch_process(frame->data[0], w, h);
				//		if (ptr != nullptr)
				//		{
				//			v_script->v_in.Push(ptr);
				//			v_script->Notify();//通知取队列
				//		}
				//	}
				//	if (!v_script->v_out.IsEmpty())
				//	{
				//		mem_info_ptr ptr = v_script->v_out.FrontAndPop();
				//		if (ptr != nullptr)
				//		{
				//			//开始画子画面//子画面中开始画结果

				//			//归还内存
				//			v_script->mem_return(ptr);
				//		}
				//	}

				//}
			
				std::lock_guard<std::mutex> lock(v_mutex_cache);
				if (v_cache != NULL)
				{
					av_freep(&v_cache->data[0]);
					av_frame_free(&v_cache);
				}
				//保存最后一帧当前帧
				v_cache = frame;
				//是否需要和python脚本进程交互
			}
		}
	}


	//同时窗口缩小到图片大小,裁剪掉黑边,
	void func_cut(int windoww, int windowy, int imagew,int imageh)
	{
		v_drawer.func_cut(windoww, windowy,imagew, imageh);
	}
	void func_restore()
	{
		v_drawer.func_restore();
	}


//	std::map<string,Script*>
	script_ptr v_script = nullptr;
	std::mutex _script_;
	void func_script_add(std::string path)
	{
		//_script_.lock();
		if(v_script==nullptr)
		{
			v_script = std::make_shared<s_script>();
			int w = v_decoder.func_get_decode_width();
			int h = v_decoder.func_get_decode_height();
			//内存分为5块,每块为w*h*3-->bgr24
			int blocksize = 5;
			v_script->func_init(w, h,5,v_url.c_str(),path.c_str());
			//v_script->add(path);
			v_script->Start();
			//processRet("python.exe", path.c_str());
		}
	}
	
	void func_script_del(std::string path)
	{
		if (v_script != nullptr)
			v_script->Stop();
	}

};


三、定义c_script

#include <string>
#include <vector>
#include <algorithm>
#include <future>
#include <chrono>
#include "c_string.h"
#include "tcpclient.h"
#include "memshare.h"
#include "TThreadRunable.h"
typedef struct s_script:public TThreadRunable
{
	s_script()
	{
	}
	~s_script()
	{
		v_container.clear();
	}
	//文件路径
	std::string v_path;
	std::string v_sharename;
	std::string v_url;
	int v_totalblock = 0;
	int v_w = 0;
	int v_h = 0;
	std::list<mem_info_ptr> v_container;
	//输入
	flist<mem_info_ptr> v_in;

	//输出
	flist<mem_info_ptr> v_out;

	cmem_map v_map;
	std::mutex _mr;
	
	tcpclient v_tcpclient;


	mem_info_ptr func_fetch()
	{
		std::lock_guard<std::mutex> lock(_mr);
		if (v_container.size() > 0)
		{
			mem_info_ptr data = v_container.front();
			//block = v_container.front().block;
			v_container.pop_front();
			return data;
		}
		return nullptr;
	}




	void func_init(int w, int h,int block,const char* url,const char* path)
	{
		v_w = w;
		v_h = h;
		v_url = url;
		v_path = path;
		//有多少个实际内存块
		v_totalblock = block;
	
	}
	void Stop()
	{
		TThreadRunable::Stop();
		v_tcpclient.Stop();
	}

	mem_info_ptr  mem_getch_process(uint8_t* data ,int w, int h)
	{
		mem_info_ptr mem = nullptr;
		{
			std::lock_guard<std::mutex> lock(_mr);
			if (v_container.size() > 0)
			{
				mem = v_container.front();
				//block = v_container.front().block;
				v_container.pop_front();
			}
			else
			{
				return nullptr;
			}
		}
		v_map.Write(mem->block, data, w, h);
		//memcpy(mem->v_mem, data, w*h * 3);
		return mem;
	}
	
	//从链上归还内存
	void mem_return(mem_info_ptr ptr)
	{
		//mem_info_ptr ptr = std::make_shared<s_mem_info>();
		//ptr->v_mem = data;
		//ptr->block = block;
		std::lock_guard<std::mutex> lock(_mr);
		v_container.emplace_back(ptr);
	}

	int func_process()
	{
		std::string p = "python.exe ";
		p += v_path;
		std::system(p.c_str());
		return 0;
	}


	void Run()
	{

		_canstart = false;
		const char* sharename = getfilename(v_path.c_str());
		//0 or 1
		if (v_map.Open(v_w, v_h, v_totalblock , sharename) >= 0)
		{
			//std::lock_guard<std::mutex> lock(_mr);
			for (int i = 0; i < v_totalblock; i++)
			{
				mem_info_ptr meminfo = std::make_shared<s_mem_info>();
				meminfo->v_mem = v_map.GetBlock(i);
				meminfo->block = i;
				v_container.push_back(meminfo);
			}
		}
		int connected = -1;
		int connectime = 0;
		while (connected == -1)
		{
			connected = v_tcpclient.Connect();
			if (connected == -1)
			{
				std::this_thread::sleep_for(std::chrono::milliseconds(200));
				//std::this_thread::sleep_for(std::chrono::mill (1000));
			}
			else
				break;
			if (++connectime == 10)
				break;
		}
		if (connected && !v_tcpclient.ready())//握手,ready 发送 返回大写 READY
		{
			return;
		}
		//通知服务端有多少内存
		if (connected && v_tcpclient.writeInfo(v_w, v_h, v_totalblock,
			sharename, (int)strlen(sharename)) == -1)
			return;

		//auto fut = std::async(std::launch::async,&s_script::func_process,this);
		while (connected)
		{
			//注意这里要分配一个端口
			//发送和接收数据
			if (IsStop())
				break;
    		//if (fut.wait_for(5ms) == std::future_status::ready)
			//	break;
			this->WaitForSignal();
			//通知那里有一副图像已经准备好了
			mem_info_ptr info = v_in.FrontAndPop();
			if (v_tcpclient.write_signal(info->block) == -1)
				break;

			if (v_tcpclient.ready2())
			{//read 代表处理结束
				v_out.Push(info);
			}
			else
				break;
		}
		v_in.clear();
		v_out.clear();
		_stop = true;
		_canstart = true;
		v_tcpclient.Stop();
	}


}s_script;

四、定义cmem_map类

和python内存共享

class cmem_map
{
	HANDLE hFile = NULL;
	OFSTRUCT opBuf;
	HANDLE hMapfile = NULL;
	HANDLE hMapview = NULL;
	//long offset = 0;
public:
	cmem_map() {}
	~cmem_map() {}

	bool IsOpen()
	{
		return (hMapview != NULL);
	}
	bool IsClosed()
	{
		return (hMapfile == NULL);
	}
	int Open(int w,int h,int block, const char* shareName)
	{
		//hFile = ::CreateFile(memfile_add, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
		//if (hFile == 0)
		//{
		//	printf("open file failed!\n");
		//	return -1;
		//}
		v_w = w;
		v_h = h;
		if (hMapfile != NULL)
			return 1;
		//v_mem_block_number = blocknum;
		//两块内存地址,一块是输入图像内存,一块是输出图像内存
		int fsize = w * h * 3 * block;

		//2.为指定文件创建一个有名的文件映象
		HANDLE hMapfile = CreateFileMappingA(INVALID_HANDLE_VALUE, NULL/*&ScrtyAttr*/, PAGE_READWRITE | SEC_COMMIT, 0, fsize, shareName);

		//hMapfile = CreateFileMapping((HANDLE)hFile, NULL, PAGE_READWRITE, 0, fsize, L"MapTest");
		if (hMapfile == NULL)
		{
			printf("mapping file failed!\n");
			return -1;
		}
		//关闭文件句柄
		//CloseHandle((HANDLE)hFile);
		//hFile = 0;
		hMapview = MapViewOfFile(hMapfile, FILE_MAP_WRITE, 0, 0, 0);
		if (hMapview == NULL)
		{
			printf("mapping view failed!\n");
			return -1;
		}
		return 0;
	}
	void Close()
	{
		UnmapViewOfFile(hMapview);
		//关闭文件映象句柄
		CloseHandle(hMapfile);
		hMapfile = 0;
	}
	int Write(int block, uint8_t* data, int w,int h)
	{
		uint8_t* recv = (uint8_t*)hMapview;
		recv += block*w*h*3;
		memcpy(recv, data, w*h*3);
		//把文件映射视图中的修改的内容或全部写回到磁盘文件中
		FlushViewOfFile(recv,w*h*3);
		return 0;
	}

	//int v_mem_block_number = 0;
	int v_w = 0;
	int v_h = 0;
	//int v_now_mem_point = -1;
	//uint8_t* v_read = NULL;
	//uint8_t* v_write = NULL;
	//uint8_t* GetFirst()
	//{
	//	v_now_mem_point=0;
	//	uint8_t *recv = (uint8_t *)hMapview;
	//	return recv;
	//}
	uint8_t* GetBlock(int block)
	{
		//v_now_mem_point++;
		//if (v_now_mem_point == v_mem_block_number)
		//	v_now_mem_point = 0;
		uint8_t *recv = (uint8_t *)hMapview;
		recv += v_w * v_h * 3 * block;
		return recv;
	}
};


typedef struct s_mem_info
{
	uint8_t* v_mem = NULL;
	int block = 0;
}s_mem_info;
typedef std::shared_ptr<s_mem_info> mem_info_ptr;

五、负责和python通信

使用socket进行通信

#define ASIO_STANDALONE
#include "asio.hpp"
#include "asio/buffer.hpp"
#include "asio/io_context.hpp"
#include "asio/ip/tcp.hpp"
#include "asio/read_until.hpp"
#include "asio/steady_timer.hpp"
#include "asio/write.hpp"
#include <functional>
#include <iostream>
#include <string>

using asio::steady_timer;
using asio::ip::tcp;
using std::placeholders::_1;
using std::placeholders::_2;
class tcpclient
{
	asio::io_context ioc;
	asio::ip::tcp::resolver resolver;// (ioc);
	asio::ip::tcp::socket socket;// (ioc);
public:
	tcpclient():resolver(ioc),socket(ioc)
	{}
	~tcpclient()
	{
		Stop();
	}
public:

	int Connect()
	{
		try
		{
			asio::ip::tcp::endpoint ep(asio::ip::address::from_string("127.0.0.1"), 11000);
			socket.connect(ep);
			if (!socket.is_open())
			{
				//AfxMessageBox(L"连接服务器失败");
				//std::cout << "连接服务器失败" << std::endl;
				return -1;
			}
			return 1;
		}
		catch (std::exception& e)
		{
			std::cerr << "Exception: " << e.what() << "\n";
			return -1;
		}
	}

	void Stop()
	{
		if (socket.is_open())
			socket.close();
	}
	//第一次握手,ready READY接收
	bool ready()
	{
		try
		{
			asio::write(socket, asio::buffer("ready", 5));
			char buffer[6];
			int len = 5;
			size_t rl = asio::read(socket, asio::buffer(buffer,len));
			buffer[5] = '\0';
			const char* src = &buffer[0];
			const char* dst = "READY";
			if (strcmp(src, dst) == 0)
			{
				return true;
			}
			return false;
		}
		catch (std::exception& e)
		{
			std::cerr << "Exception: " << e.what() << "\n";
			return false;
		}
	}

	//第二次握手协议,接收到ready 后发送block
	bool ready2()
	{
		try
		{
			char buffer[6];
			int len = 5;
			size_t rl = asio::read(socket, asio::buffer(buffer, len));
			buffer[5] = '\0';
			const char* src = &buffer[0];
			const char* dst = "ready";
			if (strcmp(src, dst) == 0)
			{
				return true;
			}
			return false;
		}
		catch (std::exception& e)
		{
			std::cerr << "Exception: " << e.what() << "\n";
			return false;
		}
	}
	//第二次发送基本信息,图像的分辨率 block内存块数, 发送的内存共享地址名称
	size_t writeInfo(uint16_t width,uint16_t height,uint16_t block,const char* data, int len)
	{
		try
		{
			char buffer[256];
			int totallen = 2 * 3 + len;
			if (totallen + 4 > 256)
				return -1;
			char* pos = &buffer[0];
			char* head = pos;
			uint32_t* tmp = (uint32_t*)pos;
			*tmp = totallen;
			pos += 4;
			memcpy(pos, &width, 2);
			pos += 2;
			memcpy(pos, &height, 2);
			pos += 2;
			memcpy(pos, &block, 2);
			pos += 2;
			memcpy(pos, data, len);
			//10 is 4+2+2+2
			return asio::write(socket, asio::buffer(head, 10 + len));
		}
		catch (std::exception& e)
		{
			std::cerr << "Exception: " << e.what() << "\n";
			return -1;
		}
	}
	//写入两个字节的信号,block为内存的哪一块
	size_t write_signal(uint16_t block)
	{
		try
		{
			uint8_t* pos = (uint8_t*)&block;
			return asio::write(socket, asio::buffer(pos, 2));
		}
		catch (std::exception& e)
		{
			std::cerr << "Exception: " << e.what() << "\n";
			return -1;
		}
	}
};

六、python 共享内存

import socket  	
import threading
import cv2
import numpy as np
#from PIL import Image
from mmap import mmap
import contextlib
import time
import mtcnn
print(mtcnn.__version__)


def recvsize(clientsock,addr,num):
   had_received = 0
   data_body = bytes()
   while had_received<num:
      part = clientsock.recv(num)
	  data_body +=part
	  part_len = len(part)
	  had_received += part_len
   return data_body
def recv(clientsock,addr,detector):
   #data = clientsock.recv(4)
   clientsock.send("ready")
   data = recvsize(clientsock,addr,4)# 得到协议四字节长度
   totalsize =  (data[3]<<24)+ (data[2]<<16)+(data[1]<< 8)+data[0]
   data = recvsize(clientsock,addr,2)
   width = (data[1]<<8)+ data[0]
   data  = recvsize(clientsock,addr,2)
   height = (data[1]<<8)+ data[0]
   data = recvsize(clientsock,addr,2)
   blocktotal = (data[1]<<8)+ data[0]
   name = recvsize(clientsock,addr,totalsize-6)   
   memsize = width*height*3*blocktotal*2
   
   with contextlib.closing(mmap.mmap(-1, memsize, tagname=name, access=mmap.ACCESS_WRITE)) as m:
      while True:
         data = recvsize(clientsock,addr,2)
		 block = (data[1]<<8)+ data[0]
		 m.seek(width*height*3*2* block)
		 data_body = m.read(width*height*3)#bgr  
         frombuf = np.frombuffer(data_body, np.uint8)
         img = np.array(frombuf).reshape((height, width, 3)).astype(np.uint8)
		 faces = detector.detect_faces(pixels)
		 for result in faces:
            x,y,width,height = result['box']
            leye = result['keypoints']['left_eye']
             cv2.rectangle(pixels,(x,y),(x+width,y+height),(0, 255, 0),2)
		 clientsock.send("ready")
             #cv2.imshow("test",img)

def main():
   #with contextlib.closing(mmap.mmap(-1, 1024, tagname='test', access=mmap.ACCESS_WRITE)) as m:
   #准备人脸识别
   detector = MTCNN()
   host = '127.0.0.1' #主机IP			
   port = 11000		  #端口
   web = socket.socket(socket.AF_INET, socket.SOCK_STREAM)#创建TCP/IP套接字
   web.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
   web.bind((host, port))					#绑定端口
   web.listen(5)							#设置最多连接数
   print("服务器等待客户端连接...")		
   while True:
      conn, addr = web.accept()   		#建立客户端连接
	  print("addr is",addr)
      t1 = threading.Thread(target=recv,args=(conn,addr,detector))
   #创建一个死循环
if __name__ == '__main__':
   main()


网站公告

今日签到

点亮在社区的每一天
去签到