AVFrame:( This structure describes decoded (raw) audio or video data. AVFrame must be allocated using av_frame_alloc(). Note that this only allocates the AVFrame itself, the buffers for the data must be managed through other means (see below). AVFrame must be freed with av_frame_free().)
typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
/**
* pointer to the picture/channel planes.
* This might be different from the first allocated byte
*
* Some decoders access areas outside 0,0 - width,height, please
* see avcodec_align_dimensions2(). Some filters and swscale can read
* up to 16 bytes beyond the planes, if these filters are to be used,
* then 16 extra bytes must be allocated.
*/
uint8_t *data[AV_NUM_DATA_POINTERS];
/**
* For video, size in bytes of each picture line.
* For audio, size in bytes of each plane.
*
* For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size.
*
* For video the linesizes should be multiplies of the CPUs alignment
* preference, this is 16 or 32 for modern desktop CPUs.
* Some code requires such alignment other code can be slower without
* correct alignment, for yet other it makes no difference.
*
* @note The linesize may be larger than the size of usable data -- there
* may be extra padding present for performance reasons.
*/
int linesize[AV_NUM_DATA_POINTERS];
/**
* pointers to the data planes/channels.
*
* For video, this should simply point to data[].
*
* For planar audio, each channel has a separate data pointer, and
* linesize[0] contains the size of each channel buffer.
* For packed audio, there is just one data pointer, and linesize[0]
* contains the total size of the buffer for all channels.
*
* Note: Both data and extended_data should always be set in a valid frame,
* but for planar audio with more channels that can fit in data,
* extended_data must be used in order to access all channels.
*/
uint8_t **extended_data;
/**
* width and height of the video frame
*/
int width, height;
/**
* number of audio samples (per channel) described by this frame
*/
int nb_samples;
/**
* format of the frame, -1 if unknown or unset
* Values correspond to enum AVPixelFormat for video frames,
* enum AVSampleFormat for audio)
*/
int format;
/**
* 1 -> keyframe, 0-> not
*/
int key_frame;
/**
* Picture type of the frame.
*/
enum AVPictureType pict_type;
#if FF_API_AVFRAME_LAVC
attribute_deprecated
uint8_t *base[AV_NUM_DATA_POINTERS];
#endif
/**
* Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
*/
AVRational sample_aspect_ratio;
/**
* Presentation timestamp in time_base units (time when frame should be shown to user).
*/
int64_t pts;
/**
* PTS copied from the AVPacket that was decoded to produce this frame.
*/
int64_t pkt_pts;
/**
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
* This is also the Presentation time of this AVFrame calculated from
* only AVPacket.dts values without pts values.
*/
int64_t pkt_dts;
/**
* picture number in bitstream order
*/
int coded_picture_number;
/**
* picture number in display order
*/
int display_picture_number;
/**
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
*/
int quality;
#if FF_API_AVFRAME_LAVC
attribute_deprecated
int reference;
/**
* QP table
*/
attribute_deprecated
int8_t *qscale_table;
/**
* QP store stride
*/
attribute_deprecated
int qstride;
attribute_deprecated
int qscale_type;
/**
* mbskip_table[mb]>=1 if MB didn't change
* stride= mb_width = (width+15)>>4
*/
attribute_deprecated
uint8_t *mbskip_table;
/**
* motion vector table
* @code
* example:
* int mv_sample_log2= 4 - motion_subsample_log2;
* int mb_width= (width+15)>>4;
* int mv_stride= (mb_width << mv_sample_log2) + 1;
* motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
* @endcode
*/
int16_t (*motion_val[2])[2];
/**
* macroblock type table
* mb_type_base + mb_width + 2
*/
attribute_deprecated
uint32_t *mb_type;
/**
* DCT coefficients
*/
attribute_deprecated
short *dct_coeff;
/**
* motion reference frame index
* the order in which these are stored can depend on the codec.
*/
attribute_deprecated
int8_t *ref_index[2];
#endif
/**
* for some private data of the user
*/
void *opaque;
/**
* error
*/
uint64_t error[AV_NUM_DATA_POINTERS];
#if FF_API_AVFRAME_LAVC
attribute_deprecated
int type;
#endif
/**
* When decoding, this signals how much the picture must be delayed.
* extra_delay = repeat_pict / (2*fps)
*/
int repeat_pict;
/**
* The content of the picture is interlaced.
*/
int interlaced_frame;
/**
* If the content is interlaced, is top field displayed first.
*/
int top_field_first;
/**
* Tell user application that palette has changed from previous frame.
*/
int palette_has_changed;
#if FF_API_AVFRAME_LAVC
attribute_deprecated
int buffer_hints;
/**
* Pan scan.
*/
attribute_deprecated
struct AVPanScan *pan_scan;
#endif
/**
* reordered opaque 64bit (generally an integer or a double precision float
* PTS but can be anything).
* The user sets AVCodecContext.reordered_opaque to represent the input at
* that time,
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
* @deprecated in favor of pkt_pts
*/
int64_t reordered_opaque;
#if FF_API_AVFRAME_LAVC
/**
* @deprecated this field is unused
*/
attribute_deprecated void *hwaccel_picture_private;
attribute_deprecated
struct AVCodecContext *owner;
attribute_deprecated
void *thread_opaque;
/**
* log2 of the size of the block which a single vector in motion_val represents:
* (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
*/
uint8_t motion_subsample_log2;
#endif
/**
* Sample rate of the audio data.
*/
int sample_rate;
/**
* Channel layout of the audio data.
*/
uint64_t channel_layout;
/**
* AVBuffer references backing the data for this frame. If all elements of
* this array are NULL, then this frame is not reference counted.
*
* There may be at most one AVBuffer per data plane, so for video this array
* always contains all the references. For planar audio with more than
* AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
* this array. Then the extra AVBufferRef pointers are stored in the
* extended_buf array.
*/
AVBufferRef *buf[AV_NUM_DATA_POINTERS];
/**
* For planar audio which requires more than AV_NUM_DATA_POINTERS
* AVBufferRef pointers, this array will hold all the references which
* cannot fit into AVFrame.buf.
*
* Note that this is different from AVFrame.extended_data, which always
* contains all the pointers. This array only contains the extra pointers,
* which cannot fit into AVFrame.buf.
*
* This array is always allocated using av_malloc() by whoever constructs
* the frame. It is freed in av_frame_unref().
*/
AVBufferRef **extended_buf;
/**
* Number of elements in extended_buf.
*/
int nb_extended_buf;
AVFrameSideData **side_data;
int nb_side_data;
/**
* @defgroup lavu_frame_flags AV_FRAME_FLAGS
* Flags describing additional frame properties.
*
* @{
*/
/**
* The frame data may be corrupted, e.g. due to decoding errors.
*/
#define AV_FRAME_FLAG_CORRUPT (1 << 0)
/**
* @}
*/
/**
* Frame flags, a combination of @ref lavu_frame_flags
*/
int flags;
#if FF_API_AVFRAME_COLORSPACE
/**
* MPEG vs JPEG YUV range.
* It must be accessed using av_frame_get_color_range() and
* av_frame_set_color_range().
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
enum AVColorRange color_range;
enum AVColorPrimaries color_primaries;
enum AVColorTransferCharacteristic color_trc;
/**
* YUV colorspace type.
* It must be accessed using av_frame_get_colorspace() and
* av_frame_set_colorspace().
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
enum AVColorSpace colorspace;
enum AVChromaLocation chroma_location;
#endif
/**
* frame timestamp estimated using various heuristics, in stream time base
* Code outside libavcodec should access this field using:
* av_frame_get_best_effort_timestamp(frame)
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
int64_t best_effort_timestamp;
/**
* reordered pos from the last AVPacket that has been input into the decoder
* Code outside libavcodec should access this field using:
* av_frame_get_pkt_pos(frame)
* - encoding: unused
* - decoding: Read by user.
*/
int64_t pkt_pos;
/**
* duration of the corresponding packet, expressed in
* AVStream->time_base units, 0 if unknown.
* Code outside libavcodec should access this field using:
* av_frame_get_pkt_duration(frame)
* - encoding: unused
* - decoding: Read by user.
*/
int64_t pkt_duration;
/**
* metadata.
* Code outside libavcodec should access this field using:
* av_frame_get_metadata(frame)
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
AVDictionary *metadata;
/**
* decode error flags of the frame, set to a combination of
* FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
* were errors during the decoding.
* Code outside libavcodec should access this field using:
* av_frame_get_decode_error_flags(frame)
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
int decode_error_flags;
#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
#define FF_DECODE_ERROR_MISSING_REFERENCE 2
/**
* number of audio channels, only used for audio.
* Code outside libavcodec should access this field using:
* av_frame_get_channels(frame)
* - encoding: unused
* - decoding: Read by user.
*/
int channels;
/**
* size of the corresponding packet containing the compressed
* frame. It must be accessed using av_frame_get_pkt_size() and
* av_frame_set_pkt_size().
* It is set to a negative value if unknown.
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
int pkt_size;
/**
* Not to be accessed directly from outside libavutil
*/
AVBufferRef *qp_table_buf;
} AVFrame;
AVFrame 是用来描述 解码后的数据;即存储原始数据;(非压缩数据),对视频来说是YUV RGB,对音频来说是PCM;此外还包括一些相关的信息;,解码的时候存储了宏块类型表,QP表,运动矢量表等数据。编码的时候也存储了相关的数据。因此在使用FFMPEG进行码流分析的时候,
uint8_t *data[AV_NUM_DATA_POINTERS]:解码后原始数据(对视频来说是YUV,RGB,对音频来说是PCM)
int linesize[AV_NUM_DATA_POINTERS]:data中“一行”数据的大小。注意:未必等于图像的宽,一般大于图像的宽。
nt width, height:视频帧宽和高(1920x1080,1280x720...)
int nb_samples:音频的一个AVFrame中可能包含多个音频帧,在此标记包含了几个
int format:解码后原始数据类型(YUV420,YUV422,RGB24...)
int key_frame:是否是关键帧
enum AVPictureType pict_type:帧类型(I,B,P...)
AVRational sample_aspect_ratio:宽高比(16:9,4:3...)
int64_t pts:显示时间戳
int coded_picture_number:编码帧序号
int display_picture_number:显示帧序号
int8_t *qscale_table:QP表
uint8_t *mbskip_table:跳过宏块表
int16_t (*motion_val[2])[2]:运动矢量表
uint32_t *mb_type:宏块类型表
short *dct_coeff:DCT系数,这个没有提取过
int8_t *ref_index[2]:运动估计参考帧列表(貌似H.264这种比较新的标准才会涉及到多参考帧)
int interlaced_frame:是否是隔行扫描
uint8_t motion_subsample_log2:一个宏块中的运动矢量采样个数,取log的
1.data[] 2.pict_type
<strong>enum AVPictureType {
AV_PICTURE_TYPE_NONE = 0, ///< Undefined
AV_PICTURE_TYPE_I, ///< Intra
AV_PICTURE_TYPE_P, ///< Predicted
AV_PICTURE_TYPE_B, ///< Bi-dir predicted
AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG4
AV_PICTURE_TYPE_SI, ///< Switching Intra
AV_PICTURE_TYPE_SP, ///< Switching Predicted
AV_PICTURE_TYPE_BI, ///< BI type
};</strong>
宽高比是一个分数,FFMPEG中用AVRational表达分数:
/**
* rational number numerator/denominator
*/
typedef struct AVRational{
int num; ///< numerator
int den; ///< denominator
} AVRational;
4.qscale_table
QP表指向一块内存,里面存储的是每个宏块的QP值。宏块的标号是从左往右,一行一行的来的。每个宏块对应1个QP。
qscale_table[0]就是第1行第1列宏块的QP值;qscale_table[1]就是第1行第2列宏块的QP值;qscale_table[2]就是第1行第3列宏块的QP值。以此类推...
宏块的个数用下式计算:
注:宏块大小是16x16的。
每行宏块数:int mb_stride = pCodecCtx->width/16+1
宏块的总数:int mb_sum = ((pCodecCtx->height+15)>>4)*(pCodecCtx->width/16+1)
1个运动矢量所能代表的画面大小(用宽或者高表示,单位是像素),注意,这里取了log2。
代码注释中给出以下数据:
4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2
即1个运动矢量代表16x16的画面的时候,该值取4;1个运动矢量代表8x8的画面的时候,该值取3...以此类推
6.motion_val
运动矢量表存储了一帧视频中的所有运动矢量。
该值的存储方式比较特别:int16_t (*motion_val[2])[2];
int mv_sample_log2= 4 - motion_subsample_log2;
int mb_width= (width+15)>>4;
int mv_stride= (mb_width << mv_sample_log2) + 1;
motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
解析:
1.首先分为两个列表L0和L1
2.每个列表(L0或L1)存储了一系列的MV(每个MV对应一个画面,大小由motion_subsample_log2决定)
3.每个MV分为横坐标和纵坐标(x,y)
注意,在FFMPEG中MV和MB在存储的结构上是没有什么关联的,第1个MV是屏幕上左上角画面的MV(画面的大小取决于motion_subsample_log2),第2个MV是屏幕上第1行第2列的画面的MV,以此类推。因此在一个宏块(16x16)的运动矢量很有可能如下图所示(line代表一行运动矢量的个数):
//例如8x8划分的运动矢量与宏块的关系:
//-------------------------
//| | |
//|mv[x] |mv[x+1] |
//-------------------------
//| | |
//|mv[x+line]|mv[x+line+1]|
//-------------------------
7.mb_type
宏块类型表存储了一帧视频中的所有宏块的类型。其存储方式和QP表差不多。只不过其是uint32类型的,而QP表是uint8类型的。每个宏块对应一个宏块类型变量。
宏块类型如下定义所示:
//The following defines may change, don't expect compatibility if you use them.
#define MB_TYPE_INTRA4x4 0x0001
#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
#define MB_TYPE_16x16 0x0008
#define MB_TYPE_16x8 0x0010
#define MB_TYPE_8x16 0x0020
#define MB_TYPE_8x8 0x0040
#define MB_TYPE_INTERLACED 0x0080
#define MB_TYPE_DIRECT2 0x0100 //FIXME
#define MB_TYPE_ACPRED 0x0200
#define MB_TYPE_GMC 0x0400
#define MB_TYPE_SKIP 0x0800
#define MB_TYPE_P0L0 0x1000
#define MB_TYPE_P1L0 0x2000
#define MB_TYPE_P0L1 0x4000
#define MB_TYPE_P1L1 0x8000
#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
#define MB_TYPE_QUANT 0x00010000
#define MB_TYPE_CBP 0x00020000
//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
一个宏块如果包含上述定义中的一种或两种类型,则其对应的宏块变量的对应位会被置1。
ffmpeg使用AVPacket来暂存解复用之后,解码之前的媒体数据(一个音/视频帧、一个字母包等)以及附加信息(解码时间、显示时间、时长等)
AVPacket 结构本身只是个容器,它使用data成员指向实际的数据缓冲区,这个缓冲区可以通过av_new_packet创建,可以通过 av_dup_packet 拷贝,也可以由FFMPEG的API产生(如av_read_frame),使用之后需要通过调用av_free_packet释放。
av_free_packet调用的是结构体本身的destruct函数,它的值有两种情况:(1)av_destruct_packet_nofree或 0;(2)av_destruct_packet,其中,前者仅仅是将data和size的值清0而已,后者才会真正地释放缓冲区。FFMPEG内部使用 AVPacket结构建立缓冲区装载数据,同时提供destruct函数,如果FFMPEG打算自己维护缓冲区,则将destruct设为 av_destruct_packet_nofree,用户调用av_free_packet清理缓冲区时并不能够将其释放;如果FFMPEG不会再使用 该缓冲区,则将destruct设为av_destruct_packet,表示它能够被释放。对于缓冲区不能够被释放的AVPackt,用户在使用之前 最好调用av_dup_packet进行缓冲区的克隆,将其转化为缓冲区能够被释放的AVPacket,以免对缓冲区的不当占用造成异常错误。而 av_dup_packet会为destruct指针为av_destruct_packet_nofree的AVPacket新建一个缓冲区,然后将原 缓冲区的数据拷贝至新缓冲
区,置data的值为新缓冲区的地址,同时设destruct指针为av_destruct_packet。
时间信息
时间信息用于实现多媒体同步。
同步的目的在于展示多媒体信息时,能够保持媒体对象之间固有的时间关系。同步有两类,一类是流内同步,其主要任务是保证单个媒体流内的时间关系,以满足感知 要求,如按照规定的帧率播放一段视频;另一类是流间同步,主要任务是保证不同媒体流之间的时间关系,如音频和视频之间的关系(lipsync)。
对于固定速率的媒体,如固定帧率的视频或固定比特率的音频,可以将时间信息(帧率或比特率)置于文件首部(header),如AVI的hdrl List、MP4的moov box,还有一种相对复杂的方案是将时间信息嵌入媒体流的内部,如MPEG TS和Real video,这种方案可以处理变速率的媒体,亦可有效避免同步过程中的时间漂移。
FFMPEG会为每一个数据包打上时间标 签,以更有效地支持上层应用的同步机制。时间标签有两种,一种是DTS,称为解码时间标签,另一种是PTS,称为显示时间标签。对于声音来说 ,这两个时间标签是相同的,但对于某些视频编码格式,由于采用了双向预测技术,会造成DTS和PTS的不一致。
时间信息的获取:
通过调用av_find_stream_info,多媒体应用可以从AVFormatContext对象中拿到媒体文件的时间信息:主要是总时间长度和开始时间,此外还有与时间信息相关的比特率和文件大小。其中时间信息的单位是AV_TIME_BASE:微秒。
/**
* This structure stores compressed data. It is typically exported by demuxers
* and then passed as input to decoders, or received as output from encoders and
* then passed to muxers.
*
* For video, it should typically contain one compressed frame. For audio it may
* contain several compressed frames.
*
* AVPacket is one of the few structs in FFmpeg, whose size is a part of public
* ABI. Thus it may be allocated on stack and no new fields can be added to it
* without libavcodec and libavformat major bump.
*
* The semantics of data ownership depends on the buf or destruct (deprecated)
* fields. If either is set, the packet data is dynamically allocated and is
* valid indefinitely until av_free_packet() is called (which in turn calls
* av_buffer_unref()/the destruct callback to free the data). If neither is set,
* the packet data is typically backed by some static buffer somewhere and is
* only valid for a limited time (e.g. until the next read call when demuxing).
*
* The side data is always allocated with av_malloc() and is freed in
* av_free_packet().
*/
typedef struct AVPacket {
/**
* A reference to the reference-counted buffer where the packet data is
* stored.
* May be NULL, then the packet data is not reference-counted.
*/
AVBufferRef *buf;
/**
* Presentation timestamp in AVStream->time_base units; the time at which
* the decompressed packet will be presented to the user.
* Can be AV_NOPTS_VALUE if it is not stored in the file.
* pts MUST be larger or equal to dts as presentation cannot happen before
* decompression, unless one wants to view hex dumps. Some formats misuse
* the terms dts and pts/cts to mean something different. Such timestamps
* must be converted to true pts/dts before they are stored in AVPacket.
*/
int64_t pts;
/**
* Decompression timestamp in AVStream->time_base units; the time at which
* the packet is decompressed.
* Can be AV_NOPTS_VALUE if it is not stored in the file.
*/
int64_t dts;
uint8_t *data;
int size;
int stream_index;
/**
* A combination of AV_PKT_FLAG values
*/
int flags;
/**
* Additional packet data that can be provided by the container.
* Packet can contain several types of side information.
*/
AVPacketSideData *side_data;
int side_data_elems;
/**
* Duration of this packet in AVStream->time_base units, 0 if unknown.
* Equals next_pts - this_pts in presentation order.
*/
int duration;
#if FF_API_DESTRUCT_PACKET
attribute_deprecated
void (*destruct)(struct AVPacket *);
attribute_deprecated
void *priv;
#endif
int64_t pos; ///< byte position in stream, -1 if unknown
/**
* Time difference in AVStream->time_base units from the pts of this
* packet to the point at which the output from the decoder has converged
* independent from the availability of previous frames. That is, the
* frames are virtually identical no matter if decoding started from
* the very first frame or from this keyframe.
* Is AV_NOPTS_VALUE if unknown.
* This field is not the display duration of the current packet.
* This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
* set.
*
* The purpose of this field is to allow seeking in streams that have no
* keyframes in the conventional sense. It corresponds to the
* recovery point SEI in H.264 and match_time_delta in NUT. It is also
* essential for some types of subtitle streams to ensure that all
* subtitles are correctly displayed after seeking.
*/
int64_t convergence_duration;
} AVPacket;
在AVPacket结构体中,重要的变量有以下几个:
uint8_t *data:压缩编码的数据。
例如对于H.264来说。1个AVPacket的data通常对应一个NAL。
注意:在这里只是对应,而不是一模一样。他们之间有微小的差别:使用FFMPEG类库分离出多媒体文件中的H.264码流
因此在使用FFMPEG进行视音频处理的时候,常常可以将得到的AVPacket的data数据直接写成文件,从而得到视音频的码流文件。
int size:data的大小
int64_t pts:显示时间戳
int64_t dts:解码时间戳
int stream_index:标识该AVPacket所属的视频/音频流。
文章浏览阅读1.3w次。转载自 http://www.miui.com/thread-2003672-1-1.html 当手机在刷错包或者误修改删除系统文件后会出现无法开机或者是移动定制(联通合约机)版想刷标准版,这时就会用到线刷,首先就是安装线刷驱动。 在XP和win7上线刷是比较方便的,用那个驱动自动安装版,直接就可以安装好,完成线刷。不过现在也有好多机友换成了win8/8.1系统,再使用这个_mt65驱动
文章浏览阅读1k次。SonarQube是一个代码质量管理平台,可以扫描监测代码并给出质量评价及修改建议,通过插件机制支持25+中开发语言,可以很容易与gradle\maven\jenkins等工具进行集成,是非常流行的代码质量管控平台。通CheckStyle、findbugs等工具定位不同,SonarQube定位于平台,有完善的管理机制及强大的管理页面,并通过插件支持checkstyle及findbugs等既有的流..._sonar的客户端区别
文章浏览阅读3.4k次,点赞2次,收藏27次。神经图灵机是LSTM、GRU的改进版本,本质上依然包含一个外部记忆结构、可对记忆进行读写操作,主要针对读写操作进行了改进,或者说提出了一种新的读写操作思路。神经图灵机之所以叫这个名字是因为它通过深度学习模型模拟了图灵机,但是我觉得如果先去介绍图灵机的概念,就会搞得很混乱,所以这里主要从神经图灵机改进了LSTM的哪些方面入手进行讲解,同时,由于模型的结构比较复杂,为了让思路更清晰,这次也会分开几..._神经图灵机方法改进
文章浏览阅读2.8k次。一、模型迭代方法机器学习模型在实际应用的场景,通常要根据新增的数据下进行模型的迭代,常见的模型迭代方法有以下几种:1、全量数据重新训练一个模型,直接合并历史训练数据与新增的数据,模型直接离线学习全量数据,学习得到一个全新的模型。优缺点:这也是实际最为常见的模型迭代方式,通常模型效果也是最好的,但这样模型迭代比较耗时,资源耗费比较多,实时性较差,特别是在大数据场景更为困难;2、模型融合的方法,将旧模..._模型迭代
文章浏览阅读2.3k次。1、前言上传图片一般采用异步上传的方式,但是异步上传带来不好的地方,就如果图片有改变或者删除,图片服务器端就会造成浪费。所以有时候就会和参数同步提交。笔者喜欢base64图片一起上传,但是图片过多时就会出现数据丢失等异常。因为tomcat的post请求默认是2M的长度限制。2、解决办法有两种:① 修改tomcat的servel.xml的配置文件,设置 maxPostSize=..._base64可以装换zip吗
文章浏览阅读1k次,点赞17次,收藏22次。Opencv自然场景文本识别系统(源码&教程)_opencv自然场景实时识别文字
文章浏览阅读1.3k次。拷贝虚拟机文件时间比较长,因为虚拟机 flat 文件很大,所以要等。脚本完成后,以复制虚拟机文件夹。将以下脚本内容写入文件。_exsi6.7快速克隆centos
文章浏览阅读2k次。本文主要实现基于二度好友的推荐。数学公式参考于:http://blog.csdn.net/qq_14950717/article/details/52197565测试数据为自己随手画的关系图把图片整理成文本信息如下:a b c d e f yb c a f gc a b dd c a e h q re f h d af e a b gg h f bh e g i di j m n ..._本关任务:使用 spark core 知识完成 " 好友推荐 " 的程序。
文章浏览阅读367次。南京大学高级程序设计期末复习总结,c++面向对象编程_南京大学高级程序设计
文章浏览阅读3.1k次,点赞2次,收藏12次。实现朴素贝叶斯分类器,并且根据李航《统计机器学习》第四章提供的数据训练与测试,结果与书中一致分别实现了朴素贝叶斯以及带有laplace平滑的朴素贝叶斯%书中例题实现朴素贝叶斯%特征1的取值集合A1=[1;2;3];%特征2的取值集合A2=[4;5;6];%S M LAValues={A1;A2};%Y的取值集合YValue=[-1;1];%数据集和T=[ 1,4,-1;..._朴素贝叶斯 matlab训练和测试输出
文章浏览阅读1.6k次。Markdown 文本换行_markdowntext 换行
文章浏览阅读6.7w次,点赞2次,收藏37次。win10 2016长期服务版激活错误解决方法:打开“注册表编辑器”;(Windows + R然后输入Regedit)修改SkipRearm的值为1:(在HKEY_LOCAL_MACHINE–》SOFTWARE–》Microsoft–》Windows NT–》CurrentVersion–》SoftwareProtectionPlatform里面,将SkipRearm的值修改为1)重..._错误: 0xc0000022 在运行 microsoft windows 非核心版本的计算机上,运行“slui.ex