|
1 // Copyright (c) 2003-2009 Nokia Corporation and/or its subsidiary(-ies). |
|
2 // All rights reserved. |
|
3 // This component and the accompanying materials are made available |
|
4 // under the terms of the License "Symbian Foundation License v1.0" to Symbian Foundation members and "Symbian Foundation End User License Agreement v1.0" to non-members |
|
5 // which accompanies this distribution, and is available |
|
6 // at the URL "http://www.symbianfoundation.org/legal/licencesv10.html". |
|
7 // |
|
8 // Initial Contributors: |
|
9 // Nokia Corporation - initial contribution. |
|
10 // |
|
11 // Contributors: |
|
12 // |
|
13 // Description: |
|
14 // |
|
15 |
|
16 #ifndef __DEVVIDEOCONSTANTS_H__ |
|
17 #define __DEVVIDEOCONSTANTS_H__ |
|
18 |
|
19 #include <e32base.h> |
|
20 #include <mmf/devvideo/devvideoplugininterfaceuids.hrh> |
|
21 #include <mm/conversioncoefficient.h> |
|
22 |
|
23 /** |
|
24 DevVideo Panic Category |
|
25 |
|
26 @publishedAll |
|
27 @released |
|
28 */ |
|
29 _LIT(KDevVideoPanicCategory, "DevVideo"); |
|
30 |
|
31 /** |
|
32 DevVideo Panic Codes |
|
33 |
|
34 @publishedAll |
|
35 @released |
|
36 */ |
|
37 enum TDevVideoPanicCodes |
|
38 { |
|
39 /** |
|
40 A pre-condition on a method has been violated. |
|
41 */ |
|
42 EDevVideoPanicPreConditionViolation = 1, |
|
43 /** |
|
44 A post-condition on a method has been violated. |
|
45 */ |
|
46 EDevVideoPanicPostConditionViolation = 2, |
|
47 /** |
|
48 An invalid hardware device ID has been supplied. |
|
49 */ |
|
50 EDevVideoPanicInvalidHwDeviceId = 3 |
|
51 }; |
|
52 |
|
53 |
|
54 // DevVideo Plugin Interface UIDs |
|
55 |
|
56 /** Video Decoder HW Device Plugin Interface UID |
|
57 @publishedAll |
|
58 @released |
|
59 */ |
|
60 const TUid KUidDevVideoDecoderHwDevice = {KUidDevVideoDecoderHwDeviceDefine}; |
|
61 |
|
62 /** Video Post Processor HW Device Plugin Interface UID |
|
63 @publishedAll |
|
64 @released |
|
65 */ |
|
66 const TUid KUidDevVideoPostProcessorHwDevice = {KUidDevVideoPostProcessorHwDeviceDefine}; |
|
67 |
|
68 /** Video Encoder HW Device Plugin Interface UID |
|
69 @publishedAll |
|
70 @released |
|
71 */ |
|
72 const TUid KUidDevVideoEncoderHwDevice = {KUidDevVideoEncoderHwDeviceDefine}; |
|
73 |
|
74 /** Video Pre Processor HW Device Plugin Interface UID |
|
75 @publishedAll |
|
76 @released |
|
77 */ |
|
78 const TUid KUidDevVideoPreProcessorHwDevice = {KUidDevVideoPreProcessorHwDeviceDefine}; |
|
79 |
|
80 // DevVideo Custom Interface Uids |
|
81 |
|
82 /** MMMFVideoPlayHwDeviceExtensionScanCopy Custom Interface UID |
|
83 @publishedAll |
|
84 @released |
|
85 */ |
|
86 const TUid KUidDevVideoPlayHwDeviceExtensionScanCopy = {KUidDevVideoPlayHwDeviceExScanCopyDefine}; |
|
87 |
|
88 |
|
89 /** |
|
90 Picture frame rate constants |
|
91 |
|
92 Using these constants is recommended when the picture rate is known to match |
|
93 one of them, to ensure that floating point equality comparisons work as expected. |
|
94 |
|
95 Note that the MSL video APIs currently only deal with non-interlaced frames. For interlaced |
|
96 video, all references to the term "picture" should be considered to refer to complete frames. |
|
97 As such, the term "picture rate" here refers to the frame rate for interlaced video. |
|
98 |
|
99 @publishedAll |
|
100 @released |
|
101 */ |
|
102 const TReal KPictureRate5 = 5.0; |
|
103 const TReal KPictureRate75 = 7.5; |
|
104 const TReal KPictureRate10 = 10.0; |
|
105 const TReal KPictureRate15 = 15.0; |
|
106 const TReal KPictureRateNTSC24 = 23.97602397602398; // == 24000/1001 |
|
107 const TReal KPictureRate25 = 25.0; |
|
108 const TReal KPictureRateNTSC30 = 29.97002997002997; // == 30000/1001 |
|
109 const TReal KPictureRate30 = 30.0; |
|
110 |
|
111 |
|
112 /** |
|
113 Specifies the data format used for an uncompressed picture. |
|
114 The values are bit patterns that can be combined with other format definition constants. |
|
115 |
|
116 @publishedAll |
|
117 @released |
|
118 */ |
|
119 enum TImageDataFormat |
|
120 { |
|
121 /** Raw RGB picture data in a memory area. |
|
122 */ |
|
123 ERgbRawData = 0x01000000, |
|
124 /** RGB picture data stored in a Symbian OS CFbsBitmap object. |
|
125 */ |
|
126 ERgbFbsBitmap = 0x02000000, |
|
127 /** Raw YUV picture data stored in a memory area. The data storage |
|
128 format depends on the YUV sampling pattern and data layout used. |
|
129 */ |
|
130 EYuvRawData = 0x04000000 |
|
131 }; |
|
132 |
|
133 |
|
134 /** |
|
135 RGB uncompressed image format alternatives. |
|
136 @publishedAll |
|
137 @released |
|
138 */ |
|
139 enum TRgbFormat |
|
140 { |
|
141 /** |
|
142 16-bit RGB data format with four pixels per component. |
|
143 The data format is the same as used in Symbian EColor4K bitmaps, |
|
144 with each pixel using two bytes with the bit layout [ggggbbbb xxxxrrrr] |
|
145 where "x" indicates unused bits. (This corresponds to "XRGB" 16-bit little-endian halfwords) |
|
146 */ |
|
147 ERgb16bit444 = ERgbRawData | 0x00000001, |
|
148 |
|
149 /** |
|
150 16-bit RGB data format with five bits per component for red and blue and |
|
151 six bits for green. The data format is the same as used in Symbian EColor64K bitmaps, |
|
152 with each pixel using two bytes with the bit layout [gggbbbbb rrrrrggg] |
|
153 (This corresponds to "RGB" 16-bit little-endian halfwords) |
|
154 */ |
|
155 ERgb16bit565 = ERgbRawData | 0x00000002, |
|
156 |
|
157 /** |
|
158 32-bit RGB data format with eight bits per component. |
|
159 This data format is the same as is used in Symbian EColor16MU bitmaps. The bit layout is |
|
160 [bbbbbbbb gggggggg rrrrrrrr xxxxxxxx] where "x" indicates unused bits. |
|
161 (This corresponds to "XRGB" 32-bit little-endian words) |
|
162 */ |
|
163 ERgb32bit888 = ERgbRawData | 0x00000004, |
|
164 |
|
165 /** |
|
166 CFbsBitmap object with EColor4K data format. |
|
167 */ |
|
168 EFbsBitmapColor4K = ERgbFbsBitmap | 0x00000001, |
|
169 |
|
170 /** |
|
171 CFbsBitmap object with EColor64K data format. |
|
172 */ |
|
173 EFbsBitmapColor64K = ERgbFbsBitmap | 0x00000002, |
|
174 |
|
175 /** |
|
176 CFbsBitmap object with EColor16M data format. |
|
177 */ |
|
178 EFbsBitmapColor16M = ERgbFbsBitmap | 0x00000004, |
|
179 |
|
180 /** |
|
181 CFbsBitmap object with EColor16MU data format. |
|
182 */ |
|
183 EFbsBitmapColor16MU = ERgbFbsBitmap | 0x00000008 |
|
184 }; |
|
185 |
|
186 |
|
187 /** |
|
188 YUV (YCbCr) uncompressed image data sampling pattern. |
|
189 @publishedAll |
|
190 @released |
|
191 */ |
|
192 enum TYuvSamplingPattern |
|
193 { |
|
194 /** |
|
195 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. |
|
196 The four luminance sample positions are on the corners of a square. The chrominance sample position |
|
197 is vertically half-way of the luminance sample positions and horizontally aligned with the left |
|
198 side of the square. This is the MPEG-2 and the MPEG-4 Part 2 sampling pattern. |
|
199 */ |
|
200 EYuv420Chroma1 = 0x00000001, |
|
201 |
|
202 /** |
|
203 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. |
|
204 The four luminance sample positions are on the corners of a square. The chrominance sample position |
|
205 is vertically and horizontally in the middle of the luminance sample positions. This is the MPEG-1 sampling pattern. |
|
206 */ |
|
207 EYuv420Chroma2 = 0x00000002, |
|
208 |
|
209 /** |
|
210 4:2:0 sampling format. 4 luminance sample positions correspond to one chrominance sample position. |
|
211 The four luminance sample positions are on the corners of a square. The chrominance sample position |
|
212 colocates with the top-left corner of the square. This sampling format is one of the options in Annex E of H.264 | MPEG-4 AVC. |
|
213 */ |
|
214 EYuv420Chroma3 = 0x00000004, |
|
215 |
|
216 /** |
|
217 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. |
|
218 The luminance sample positions reside on the same pixel row. The chrominance sample position is co-located |
|
219 with the left one of the luminance sample positions. This is the MPEG-2 4:2:2 sampling pattern. |
|
220 */ |
|
221 EYuv422Chroma1 = 0x00000008, |
|
222 |
|
223 /** |
|
224 4:2:2 sampling format. 2 luminance sample positions correspond to one chrominance sample position. |
|
225 The luminance sample positions reside on the same pixel row. The chrominance sample position is in the |
|
226 middle of the luminance sample positions. This is the MPEG-1 4:2:2 sampling pattern. |
|
227 */ |
|
228 EYuv422Chroma2 = 0x00000010 |
|
229 }; |
|
230 |
|
231 |
|
232 /** |
|
233 Defines the YUV data layout in a decoded picture. |
|
234 @publishedAll |
|
235 @released |
|
236 */ |
|
237 enum TYuvDataLayout |
|
238 { |
|
239 /** |
|
240 The data is stored in a plane mode. The memory buffer contains first all Y component |
|
241 data for the whole picture, followed by U and V, making the data format Y00Y01Y02Y03...U0...V0... |
|
242 For YUV 4:2:0 data, this is the same data format as EFormatYUV420Planar in the Onboard Camera API |
|
243 */ |
|
244 EYuvDataPlanar = 0x00000001, |
|
245 |
|
246 /** |
|
247 The data is stored interleaved mode, all components interleaved in a single memory block. |
|
248 Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is Y1VY0U, |
|
249 corresponding to "UY0VY1" little-endian 32-bit words. |
|
250 This is the same data format as EFormatYUV422Reversed in the Onboard Camera API |
|
251 */ |
|
252 EYuvDataInterleavedLE = 0x00000002, |
|
253 |
|
254 /** |
|
255 The data is stored interleaved mode, all components interleaved in a single memory block. |
|
256 Interleaved layout is only supported for YUV 4:2:2 data. The data byte order is UY0VY1, |
|
257 corresponding to "UY0VY1" big-endian 32-bit words. |
|
258 This is the same data format as EFormatYUV422 in the Onboard Camera API |
|
259 */ |
|
260 EYuvDataInterleavedBE = 0x00000004, |
|
261 /** |
|
262 The data is stored in a semi-planar mode. The memory buffer contains first all Y component |
|
263 data for the whole picture, followed by U and V components, which are interlaced, making the data |
|
264 format Y00Y01Y02Y03...U0V0U1V1... For YUV 4:2:0 data, this is the same data format as |
|
265 FormatYUV420SemiPlanar in the Onboard Camera API |
|
266 */ |
|
267 EYuvDataSemiPlanar = 0x00000008 |
|
268 }; |
|
269 |
|
270 /** |
|
271 Defines the picture effect used for an input picture. Please refer to ITU-T H.264 | ISO/IEC MPEG-4 AVC [] for the definitions of the transition effects. |
|
272 @publishedAll |
|
273 @released |
|
274 */ |
|
275 enum TPictureEffect |
|
276 { |
|
277 /** |
|
278 No effect. |
|
279 */ |
|
280 EEffectNone = 0x00000001, |
|
281 |
|
282 /** |
|
283 Fade from black. |
|
284 */ |
|
285 EEffectFadeFromBlack = 0x00000002, |
|
286 |
|
287 /** |
|
288 Fade to black. |
|
289 */ |
|
290 EEffectFadeToBlack = 0x00000004, |
|
291 |
|
292 /** |
|
293 Unspecified transition from or to constant colour. |
|
294 */ |
|
295 EEffectUnspecifiedThroughConstantColor = 0x00000008, |
|
296 |
|
297 /** |
|
298 Dissolve. |
|
299 */ |
|
300 EEffectDissolve = 0x00000010, |
|
301 |
|
302 /** |
|
303 Wipe. |
|
304 */ |
|
305 EEffectWipe = 0x00000020, |
|
306 |
|
307 /** |
|
308 Unspecified mixture of two scenes. |
|
309 */ |
|
310 EEffectUnspecifiedMixOfTwoScenes = 0x00000040 |
|
311 }; |
|
312 |
|
313 /** |
|
314 Defines the data value range used for RGB data. Used for determining the correct color space conversion factors. |
|
315 @publishedAll |
|
316 @released |
|
317 */ |
|
318 enum TRgbRange |
|
319 { |
|
320 /** |
|
321 The RGB data uses the full 8-bit range of [0…255]. |
|
322 */ |
|
323 ERgbRangeFull = 0x00000001, |
|
324 |
|
325 /** |
|
326 The RGB data uses the nominal range of [16…235]. Individual samples can still contain |
|
327 values beyond that range, the rest of the 8-bit range is used for headroom and footroom. |
|
328 */ |
|
329 ERgbRange16to235 = 0x00000002 |
|
330 }; |
|
331 |
|
332 |
|
333 |
|
334 /** |
|
335 Defines possible data unit types for encoded video data. The data unit types are used both |
|
336 for encoded video input for playback as well as encoded video output from recording. |
|
337 @publishedAll |
|
338 @released |
|
339 */ |
|
340 enum TVideoDataUnitType |
|
341 { |
|
342 /** |
|
343 Each data unit is a single coded picture. |
|
344 */ |
|
345 EDuCodedPicture = 0x00000001, |
|
346 |
|
347 /** |
|
348 Each data unit is a coded video segment. |
|
349 A coded video segment is a part of the coded video data that forms an independently |
|
350 decodable part of a coded video frame. For example, a video packet in MPEG-4 Part 2 |
|
351 and slice in H.263 are coded video segments. |
|
352 */ |
|
353 EDuVideoSegment = 0x00000002, |
|
354 |
|
355 /** |
|
356 Each data unit contains an integer number of video segments consecutive in decoding order, |
|
357 possibly more than one. The video segments shall be a subset of one coded picture. |
|
358 */ |
|
359 EDuSeveralSegments = 0x00000004, |
|
360 |
|
361 /** |
|
362 Each data unit contains a piece of raw video bitstream, not necessarily aligned at any headers. |
|
363 The data must be written in decoding order. This data unit type can be used for playback if the client |
|
364 does not have information about the bitstream syntax, and just writes data in random-sized chunks. For |
|
365 recording this data unit type is useful if the client can handle arbitrarily split data units, giving the |
|
366 encoder maximum flexibility in buffer allocation. For encoded data output, each data unit must still |
|
367 belong to exactly one output picture. |
|
368 */ |
|
369 EDuArbitraryStreamSection = 0x00000008 |
|
370 }; |
|
371 |
|
372 /** |
|
373 Defines possible encapsulation types for coded video data units. The encapsulation information is |
|
374 used both for encoded video input for playback as well as encoded video output from recording. |
|
375 @publishedAll |
|
376 @released |
|
377 */ |
|
378 enum TVideoDataUnitEncapsulation |
|
379 { |
|
380 /** |
|
381 The coded data units can be chained in a bitstream that can be decoded. For example, MPEG-4 |
|
382 Part 2 elementary streams, H.263 bitstreams, and H.264 | MPEG-4 AVC Annex B bitstreams fall into this category. |
|
383 */ |
|
384 EDuElementaryStream = 0x00010000, |
|
385 |
|
386 /** |
|
387 The coded data units are encapsulated in a general-purpose packet payload format whose coded |
|
388 data units can be decoded independently but cannot be generally chained into a bitstream. |
|
389 For example, the Network Abstraction Layer Units of H.264 | MPEG-4 AVC fall into this category. |
|
390 */ |
|
391 EDuGenericPayload = 0x00020000, |
|
392 |
|
393 /** |
|
394 The coded data units are encapsulated in RTP packet payload format. The RTP payload header |
|
395 may contain codec-specific items, such as a redundant copy of a picture header in the H.263 |
|
396 payload specification RFC2429. |
|
397 */ |
|
398 EDuRtpPayload = 0x00040000 |
|
399 }; |
|
400 |
|
401 /** |
|
402 Defines the HRD/VBV specification used in a stream. |
|
403 @publishedAll |
|
404 @released |
|
405 */ |
|
406 enum THrdVbvSpecification |
|
407 { |
|
408 /** No HRD/VBV specification. */ |
|
409 EHrdVbvNone = 0x00000001, |
|
410 |
|
411 /** The HRD/VBV specification in the corresponding coding standard. */ |
|
412 EHrdVbvCodingStandard = 0x00000002, |
|
413 |
|
414 /** Annex G of 3GPP TS 26.234 Release 5. */ |
|
415 EHrdVbv3GPP = 0x00000004 |
|
416 }; |
|
417 |
|
418 /** |
|
419 Defines the pre-processor and post-processor types available in the system. |
|
420 One pre-processor or post-processor can implement multiple operations simultaneously, and thus the |
|
421 types are defined as bit values that can be combined as a bitfield. |
|
422 @publishedAll |
|
423 @released |
|
424 */ |
|
425 enum TPrePostProcessType |
|
426 { |
|
427 /** |
|
428 Input cropping, used for pan-scan cropping in video playback and digital zoom in video recording. |
|
429 Pan-scan cropping is useful, for example, for displaying arbitrary-sized pictures with codecs that |
|
430 only support image dimensions that are multiples of 16 pixels. |
|
431 */ |
|
432 EPpInputCrop = 0x00000001, |
|
433 |
|
434 /** |
|
435 Horizontal mirroring, flips the image data around a vertical line in its center. |
|
436 */ |
|
437 EPpMirror = 0x00000002, |
|
438 |
|
439 /** |
|
440 Picture rotation, supports rotation by 90 or 180 degrees, clockwise and anticlockwise. |
|
441 */ |
|
442 EPpRotate = 0x00000004, |
|
443 |
|
444 /** |
|
445 Picture scaling to a new size, includes both upscaling and downscaling. |
|
446 The supported scaling types and scale factors depend on the pixel processor. |
|
447 */ |
|
448 EPpScale = 0x00000008, |
|
449 |
|
450 /** |
|
451 Crops the picture to a final output rectangle. |
|
452 */ |
|
453 EPpOutputCrop = 0x00000010, |
|
454 |
|
455 /** |
|
456 Pads the output picture to a defined size. Used in video recording to pad pictures to |
|
457 suit the encoder input requirements. |
|
458 */ |
|
459 EPpOutputPad = 0x00000020, |
|
460 |
|
461 /** |
|
462 YUV to RGB color space conversion. Supported only for video playback. |
|
463 */ |
|
464 EPpYuvToRgb = 0x00000040, |
|
465 |
|
466 /** |
|
467 RGB to YUV color space conversion. Supported only for video recording. |
|
468 */ |
|
469 EPpRgbToYuv = 0x00000080, |
|
470 |
|
471 /** |
|
472 YUV to YUV data format conversion. Supported only for video recording. |
|
473 */ |
|
474 EPpYuvToYuv = 0x00000100, |
|
475 |
|
476 /** |
|
477 Noise filtering. Noise filtering is typically used to enhance the input |
|
478 picture from the camera, and is usually only supported for video recording. |
|
479 */ |
|
480 EPpNoiseFilter = 0x00000200, |
|
481 |
|
482 /** |
|
483 Color enhancement. Color enhancement is typically used to enhance the input picture |
|
484 from the camera, and is usually only supported for video recording. |
|
485 */ |
|
486 EPpColorEnhancement = 0x00000400, |
|
487 |
|
488 /** |
|
489 Frame stabilisation. Supported only for video recording. |
|
490 */ |
|
491 EPpFrameStabilisation = 0x00000800, |
|
492 |
|
493 /** |
|
494 Deblocking is typically used to remove artefacts from the output picture that result from |
|
495 high compression or a noisy input signal. Only supported for video playback. |
|
496 */ |
|
497 EPpDeblocking = 0x00001000, |
|
498 |
|
499 /** |
|
500 Deringing is typically used to remove artefacts from the output picture that result from |
|
501 a noisy input signal corrupting motion estimates. Only supported for video playback. |
|
502 */ |
|
503 EPpDeringing = 0x00002000, |
|
504 |
|
505 /** |
|
506 Custom hardware device specific processing. |
|
507 */ |
|
508 EPpCustom = 0x10000000 |
|
509 }; |
|
510 |
|
511 /** |
|
512 Dithering types. |
|
513 @publishedAll |
|
514 @released |
|
515 */ |
|
516 enum TDitherType |
|
517 { |
|
518 /** No dithering. */ |
|
519 EDitherNone = 0x00000001, |
|
520 |
|
521 /** Ordered dither. */ |
|
522 EDitherOrdered = 0x00000002, |
|
523 |
|
524 /** Error diffusion dither. */ |
|
525 EDitherErrorDiffusion = 0x00000004, |
|
526 |
|
527 /** Other hardware device specific dithering type. */ |
|
528 EDitherOther = 0x00000008 |
|
529 }; |
|
530 |
|
531 /** |
|
532 Rotation types for pre-processors and post-processors. |
|
533 @publishedAll |
|
534 @released |
|
535 */ |
|
536 enum TRotationType |
|
537 { |
|
538 /** No rotation. */ |
|
539 ERotateNone = 0x00000001, |
|
540 |
|
541 /** Rotate the picture 90 degrees clockwise. */ |
|
542 ERotate90Clockwise = 0x00000002, |
|
543 |
|
544 /** Rotate the picture 90 degrees anticlockwise. */ |
|
545 ERotate90Anticlockwise = 0x00000004, |
|
546 |
|
547 /** Rotate the picture 180 degrees. */ |
|
548 ERotate180 = 0x00000008 |
|
549 }; |
|
550 |
|
551 |
|
552 |
|
553 /** |
|
554 Defines possible encoding bit-rate control modes. |
|
555 @publishedAll |
|
556 @released |
|
557 */ |
|
558 enum TBitrateControlType |
|
559 { |
|
560 /** |
|
561 The encoder does not control the bit-rate, but uses specified target picture quality and picture |
|
562 rate as such. The coded data stream must still remain compliant with the standard and buffer settings |
|
563 in use, if any, and thus HRD/VBV settings can limit the possible bit-rate. |
|
564 */ |
|
565 EBrControlNone = 0x00000001, |
|
566 |
|
567 /** |
|
568 The encoder controls the coded bit-rate of the stream. The caller indicates target bit-rate, target |
|
569 picture quality, target frame rate, spatial-temporal trade-off, and latency-quality trade-off. |
|
570 */ |
|
571 EBrControlStream = 0x00000002, |
|
572 |
|
573 /** |
|
574 The encoder controls the coded bit-rate of each picture. The caller gives the target amount of bits per |
|
575 frame. Each given input frame is coded. This type of operation is applicable only in memory-buffer-based |
|
576 input. |
|
577 */ |
|
578 EBrControlPicture = 0x00000004 |
|
579 }; |
|
580 |
|
581 |
|
582 /** |
|
583 Defines the scalability type for a single bit-rate scalability layer. |
|
584 @publishedAll |
|
585 @released |
|
586 */ |
|
587 enum TScalabilityType |
|
588 { |
|
589 /** |
|
590 The layer uses temporal scalability. Using the layer increases the picture rate. |
|
591 */ |
|
592 EScalabilityTemporal = 0x00000001, |
|
593 |
|
594 /** |
|
595 The layer uses quality scalability. Using the layer improves picture quality. |
|
596 */ |
|
597 EScalabilityQuality = 0x00000002, |
|
598 |
|
599 /** |
|
600 The layer uses spatial scalability. Using the layer increases picture resolution. |
|
601 */ |
|
602 EScalabilitySpatial = 0x00000004, |
|
603 |
|
604 /** |
|
605 The layer is a fine-granularity scalability layer. In fine granularity scalability, the output |
|
606 quality increases gradually as a function of decoded bits from the enhancement layer. |
|
607 */ |
|
608 EScalabilityFineGranularity = 0x10000000, |
|
609 |
|
610 /** |
|
611 The layer is a fine-granularity quality scalability layer. |
|
612 */ |
|
613 EScalabilityQualityFG = EScalabilityFineGranularity | EScalabilityQuality |
|
614 }; |
|
615 |
|
616 /** |
|
617 Forward error control strength used for an unequal error protection level. Also other values between |
|
618 EFecStrengthNone and EFecStrengthHigh can be used, the encoder will round the values to the levels |
|
619 it supports. |
|
620 @publishedAll |
|
621 @released |
|
622 */ |
|
623 enum TErrorControlStrength |
|
624 { |
|
625 /** No error control. */ |
|
626 EFecStrengthNone = 0, |
|
627 |
|
628 /** Low error control strength. */ |
|
629 EFecStrengthLow = 256, |
|
630 |
|
631 /** Normal error control strength. */ |
|
632 EFecStrengthNormal = 512, |
|
633 |
|
634 /** High error control strength. */ |
|
635 EFecStrengthHigh = 768 |
|
636 }; |
|
637 |
|
638 /** |
|
639 Defines the scalability type for in-layer bit-rate scalability. |
|
640 @publishedAll |
|
641 @released |
|
642 */ |
|
643 enum TInLayerScalabilityType |
|
644 { |
|
645 /** Temporal scalability, such as B-pictures. */ |
|
646 EInLScalabilityTemporal = 1, |
|
647 |
|
648 /** Other scalability type. */ |
|
649 EInLScalabilityOther |
|
650 }; |
|
651 |
|
652 /** |
|
653 Defines what part of a frame is contained within a video buffer. |
|
654 @publishedAll |
|
655 @released |
|
656 */ |
|
657 enum TFramePortion |
|
658 { |
|
659 /** The frame portion is unknown. */ |
|
660 EFramePortionUnknown, |
|
661 |
|
662 /** An entire frame. */ |
|
663 EFramePortionWhole, |
|
664 |
|
665 /** A fragment of a frame containing the start but not the end. */ |
|
666 EFramePortionStartFragment, |
|
667 |
|
668 /** An fragment of a frame containing neither the start nor the end. */ |
|
669 EFramePortionMidFragment, |
|
670 |
|
671 /** A fragment of a frame containing the end but not the start. */ |
|
672 EFramePortionEndFragment |
|
673 }; |
|
674 |
|
675 #endif |