/[gnuradio]/gnuradio/src/pspectra/lib/vrp/needs-work/VrTV.h
ViewVC logotype

Contents of /gnuradio/src/pspectra/lib/vrp/needs-work/VrTV.h

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.3 - (show annotations) (download)
Sat Apr 19 20:40:05 2003 UTC (21 years ago) by eb
Branch: MAIN
CVS Tags: BR_USRP_post_merge_1, SECOND_MIGRATION_2006_08_04, REL_0_9, REL_0_8, FIRST_MIGRATION_2006_07_26, merged_BR_USRP_to_head_1, merged_head_to_BR_USRP_1, head_post_BR_USRP_merge_1, HEAD
Branch point for: BR_USRP
Changes since 1.2: +1 -1 lines
File MIME type: text/plain
see ChangeLog

1 /* -*- Mode: c++ -*-
2 *
3 * Copyright 1997 Massachusetts Institute of Technology
4 *
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that
8 * copyright notice and this permission notice appear in supporting
9 * documentation, and that the name of M.I.T. not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. M.I.T. makes no representations about the
12 * suitability of this software for any purpose. It is provided "as is"
13 * without express or implied warranty.
14 *
15 */
16
17 #ifndef _VRTV_H_
18 #define _VRTV_H_
19
20 #include <sys/types.h>
21 #include <fcntl.h> /* for file i/o */
22 #include <VrSigProc.h>
23 #include <math.h>
24 #include <qapplication.h>
25 #include <qlayout.h>
26 #include <qimage.h>
27 #include <qwidget.h>
28 #include <qpainter.h>
29 #define NeedFunctionPrototypes 1
30 #include <X11/Xlib.h>
31 #include <X11/Intrinsic.h>
32 #include <X11/extensions/XShm.h>
33 #include <sys/ipc.h>
34 #include <sys/shm.h>
35 #include <VrDecimatingSigProc.h>
36
37 #if defined (ENABLE_MMX)
38 #include <VrMMX.h>
39 #endif
40
41 //#define AUDIO_CENTER (10680000.0 - 4500000.0)
42 #define AUDIO_CENTER (10673000.0 - 4500000.0)
43 #define MAX_TAPS 1000
44 #define HORIZ_OFFSET 150
45 #define SYNC_LOW 225 /* Number of samples with 'low' value */
46 #define SYNC_HIGH 37 /* Number of samples with 'high' value */
47 #define SYNC_LENGTH 512 //(SYNC_LOW+SYNC_HIGH+SYNC_LOW)
48
49 #define SYNC_SCALE 10
50 #define SYNC_ORDER (numTaps * SYNC_SCALE)
51 #define FREE_RUN 300 /* Number of fields between search for vertical sync*/
52 #define TOTAL_SIZE ((int)((33000000 / 59.9411) / 4)) /* number of samples in 1 field*/
53 #define PER_LINE ((33000000 / 60 / 4) / 262) /* number of samples in 1 scan line */
54 #define MY_LINE_SIZE 500 /* Max number of pixels in 1 scan line */
55 #define MY_HEIGHT 400 /* Max number of lines in 1 video field */
56 #define DEBUG_COUNT 1638400 /* interval for displaying debug information */
57
58 #define MAX_COLOR 256 /* Max pixel value in color map for image display */
59 #define SKIP_FRAMES 5 //3 /* Number of frames to skip after a frame is displayed*/
60 #define BIG 999999999 /* used as initial value in max/min calculations */
61 #define TAPTYPE int /* Data type of filter taps */
62 #define SHIFT_INC 10
63
64 QWidget *master_widget;
65 QBoxLayout *master_layout;
66 QApplication *master_application;
67 int qtemp_started = 0;
68 int qtemp_argc = 1;
69 char *qtemp_argv[] = {"main"};
70
71 int debug_limit = 1;
72 int noShm = 1;
73 XShmSegmentInfo shmInfo = {0, -1, NULL, 0};
74 WId my_local_hd;
75 Display *my_local_dpy;
76 XImage *my_local_ximage;
77 uchar *local_newbits;
78 uint pix[256]; // pixel translation table
79 int dc_base, video_shift_amount, debug_counter;
80 int frame_count, frame_total, counts_between, weight_count, started_skip;
81 int vsync_value_low, vsync_value_high, vsync_sample;
82 int hsync_left, sync_next, hsync_adjust, vsize_extra;
83 int vsync_left, vsync_frame, vsync_search_count;
84 int decimate_count, window_count, vsync_start, sync_input, vsync_offset, vsync_offset_new=0;
85 int sync_window[SYNC_LENGTH];
86 int sync_high_start = SYNC_LOW, sync_high_end = SYNC_LOW + SYNC_HIGH-1;
87 int sync_average, sync_low_average, sync_high_average, sync_average_max;
88 int video_too_high, video_too_low;
89 int free_run_field_count = 1;
90 int TVinSampFreq;
91 VrComplex audio_phase_correction, audio_phase_corr_incr;
92 float audio_center_freq, audio_gain;
93 mmxTaps* audio_processedTaps; //Precomputed constants, shifted four times
94 VrComplex* audio_tap_values;
95 int audio_decimate, audio_taps, process_audio, video_skip, current_decimation;
96 void initimage(void);
97 void shm_put();
98
99 template<class iType>
100 class VrTV : public VrSigProc<iType,complex> {
101 protected:
102 int numTaps;
103 int tapincrement;
104 #if defined (ENABLE_MMX)
105 mmxTaps* mmxtaps, *mmxsynctaps, *mmxvideo;
106 #endif
107 TAPTYPE* taps, *synctaps, *tapstart, *tapend;
108 float cutoff, center_freq, gain, arg;
109 void calculate_taps(TAPTYPE *arg_taps, mmxTaps **arg_mmxtaps, int arg_order,
110 float arg_center_freq, int arg_normalize, float arg_gain);
111 int decimation;
112 int hdata, packetc;
113 int current_line, current_decim, decim_max;
114 int current_column;
115 unsigned char *current_pointer;
116 public:
117 virtual void work(int n);
118 virtual void initialize();
119 void setContr(double contr){ dc_base = (int)(contr*(-10000.0));}
120 void setBright(double bright){ video_shift_amount = (int)bright;}
121 void setVsync(double vsync){ vsync_offset = (int)(vsync*TOTAL_SIZE/400.0);}
122 VrTV(int c,int t,int d ,int f, float g, int arg_audio_decimate, int arg_audio_taps);
123 ~VrTV();
124 };
125
126 #define AUDIO_PROCESSING() { \
127 if (process_audio <= 0) { \
128 /* process_audio counts down the number of input samples that are \
129 * skipped before the next audio output point is to be calculated. \
130 * Since this number has gone to 0 when we reach the interior of the \
131 * 'if' statement, we calculate the next audio sample using the \
132 * audio FIR filter. \
133 */ \
134 inputp = inputreadptr; \
135 audio_result = 0; \
136 /*#if defined (ENABLE_MMX)*/ \
137 if(audio_processedTaps->mmxReady()) \
138 audio_result = audio_processedTaps->mmxCVDProduct(inputp); \
139 else \
140 /*#endif*/ \
141 { \
142 VrComplex *taps_tmp = audio_tap_values; \
143 for (int j=0; j < audio_taps; j++) \
144 audio_result += taps_tmp[j] * inputp[j]; \
145 } \
146 audio_phase_correction *= audio_phase_corr_incr; \
147 audio_result *= audio_phase_correction; \
148 } /* end of audio processing */ }
149
150 #define CALC_TAP() \
151 this_input = *inputp++; \
152 result_re += *tapp++ * this_input; \
153 result_im += *tapp++ * this_input;
154
155 /* Perform the video processing. */
156 #define VIDEO_AM_DEMOD() { \
157 current_decimation = decimation; \
158 inputp = inputreadptr; \
159 /*#if defined (XXENABLE_MMX) */\
160 /* if(mmxvideo->mmxReady()) { */\
161 /* video_result = mmxvideo->mmxCVDProduct(inputp); */\
162 /* current = (int) (real(video_result) * real(video_result) + imag(video_result) * imag(video_result)); */\
163 /* current = (65000 - current) >> 2; */\
164 /* } */\
165 /* else*/ \
166 /*#endif */ \
167 { \
168 result_re = 0; \
169 result_im = 0; \
170 tapp = tapstart; \
171 lasttap = tapend; \
172 /* Perform the calculation of the FIR filter for video. \
173 * This filter performs both frequency translation from 10.7 MHz to DC \
174 * and filtering of the input signal. \
175 */ \
176 while (tapp != lasttap) { \
177 /* expand inline for better optimization - tap count must be a multiple of 8 */ \
178 CALC_TAP(); CALC_TAP(); CALC_TAP(); CALC_TAP(); \
179 CALC_TAP(); CALC_TAP(); CALC_TAP(); CALC_TAP(); \
180 } \
181 result_re >>= (SHIFT_INC/2); \
182 result_im >>= (SHIFT_INC/2); \
183 /* Get the square of the absolute value of the video signal to convert it \
184 * back from the complex domain into a 'real' number. \
185 */ \
186 current = ((int) (result_re * result_re + result_im * result_im)) >> (1 * SHIFT_INC); \
187 current = (65000 - current) >> 2; \
188 } }
189
190 /* Output debug data and statistics to the screen every few seconds.
191 */
192 #define DEBUG_OUTPUT() { \
193 if (++decimate_count > DEBUG_COUNT) { \
194 decimate_count = 0; \
195 if (packetc > 0) \
196 hdata /= packetc; \
197 decim_max = hdata/300; \
198 if (debug_counter++ > debug_limit) { /* display random debug stuff every so often */ \
199 debug_limit = 2; /* move the interval out */ \
200 debug_counter = 0; \
201 if (frame_count == 0) frame_count = 1; \
202 printf ("frame %d between %d decim %d\n", \
203 frame_count, frame_total / frame_count, decim_max); \
204 printf ("vsync low %d high %d\n", vsync_value_low, vsync_value_high); \
205 printf("dc base %d shift %d packet %d avg %d\n", dc_base, video_shift_amount, packetc, hdata); \
206 printf ("too high %d low %d\n", video_too_high, video_too_low); \
207 frame_count = 0; hdata = 0; packetc = 0; frame_total = 0; \
208 video_too_high = 0; video_too_low = 0; \
209 } \
210 } /* DEBUG_COUNT */ }
211
212 #define START_NEW_VSYNC() { \
213 vsync_search_count =0; \
214 /* dc_base is used normalize the black level of the */ \
215 /** signal to 0 (DC restoration). */ \
216 dc_base = vsync_value_low; \
217 /* video_shift_amount is used to scale the brightest part */ \
218 /* of the picture to be MAX_COLORS */ \
219 tempvid = 7 * (vsync_value_high - vsync_value_low); \
220 if (tempvid > 0) { \
221 video_shift_amount = -10; \
222 while (tempvid) { \
223 video_shift_amount++; \
224 tempvid >>= 1; \
225 } \
226 } \
227 tapincrement = numTaps; \
228 mmxvideo = mmxtaps; \
229 tapstart = taps; \
230 tapend = &taps[numTaps*2]; \
231 sync_next = 0; \
232 while (vsync_offset > TOTAL_SIZE) \
233 vsync_offset -= TOTAL_SIZE; \
234 /*vsync_left = TOTAL_SIZE - vsync_offset; */ \
235 vsync_left = TOTAL_SIZE - vsync_offset_new; \
236 vsync_offset = 0; \
237 if (vsize_extra++ > 1) { \
238 vsize_extra = 0; \
239 vsync_left -= 1; \
240 } \
241 hsync_left = PER_LINE; \
242 hsync_adjust = 0; \
243 if (current_line > 120 && current_line < 300 /*170*/) { \
244 frame_total += counts_between; \
245 frame_count++; \
246 shm_put(); \
247 started_skip = 0; \
248 } \
249 counts_between = 0; \
250 current_line = 0; \
251 if ((free_run_field_count > 1) && vsync_frame == 0) current_line = 600; \
252 if (!started_skip) { \
253 started_skip = 1; \
254 video_skip = SKIP_FRAMES * TOTAL_SIZE * decimation; \
255 begin_skip = 1; \
256 } }
257
258 #define CHECK_END_OF_LINE() { \
259 /* Check for the number of pixels in one horizontal scan line and wrap */\
260 /* video image if necessary. */\
261 if (--hsync_left <= 0) { /* we got an horizontal sync */ \
262 /* when we get an horizontal sync, we reset the colum counter and */\
263 /* set 'current_pointer' to point to the frame buffer for the */\
264 /* next video line. */\
265 packetc++; \
266 if (current_line >= MY_HEIGHT) \
267 current_pointer = NULL; \
268 else \
269 current_pointer = &local_newbits[current_line++ * MY_LINE_SIZE * 2]; \
270 current_column = 0; \
271 current_decim = 0; \
272 hsync_left = PER_LINE; \
273 increment_amount += 1; \
274 inputvalid -= 1; \
275 inputreadptr += 1; \
276 if (hsync_adjust++ > 1) { \
277 hsync_adjust = 0; \
278 increment_amount += 1; \
279 inputvalid -= 1; \
280 inputreadptr += 1; \
281 vsync_left--; \
282 } \
283 } }
284
285 #define CHECK_END_OF_FIELD() { \
286 /* If we are nearing the end of the video field, increase the number of */ \
287 /* taps in the filter to obtain a higher quality image for the vertical */ \
288 /* sync recognition process. */ \
289 if (vsync_frame > free_run_field_count && vsync_left < SYNC_LENGTH) { \
290 tapincrement = SYNC_ORDER; \
291 /*#if defined (ENABLE_MMX) */\
292 mmxvideo = mmxsynctaps; \
293 /*#endif*/ \
294 tapstart = synctaps; \
295 tapend = &synctaps[SYNC_ORDER*2]; \
296 } \
297 /* We have hit the end of the current video field (as calculated by */ \
298 /* number of input samples). If we are not in free run, then start */ \
299 /* calculating correlation values for vertical sync detection. */ \
300 if (--vsync_left <= 0 && (!sync_next)) { \
301 if (vsync_frame++ > free_run_field_count) { \
302 /* start looking for a new Vertical Sync */ \
303 sync_next = 1; \
304 vsync_search_count =0; \
305 vsync_frame = 0; \
306 vsync_offset = HORIZ_OFFSET; \
307 sync_average_max = -BIG; \
308 free_run_field_count <<= 1; \
309 if (free_run_field_count > FREE_RUN) \
310 free_run_field_count = FREE_RUN; \
311 } \
312 else \
313 vsync_start = 1; \
314 } \
315 /* Calculate correlation values for vertical sync detection. \
316 */ \
317 if (sync_next) { \
318 if (vsync_search_count++ > TOTAL_SIZE + 1000) \
319 vsync_start = 1; \
320 sync_low_average += current - sync_window[sync_input]; \
321 sync_window[sync_input] = current; \
322 if (++sync_input > SYNC_LENGTH-1) sync_input = 0; \
323 sync_high_average += sync_window[sync_high_start] - sync_window[sync_high_end]; \
324 if (++sync_high_start > SYNC_LENGTH-1) sync_high_start = 0; \
325 if (++sync_high_end > SYNC_LENGTH-1) sync_high_end = 0; \
326 sync_average = (sync_high_average << 1) - sync_low_average / SYNC_LENGTH; \
327 vsync_offset++; \
328 if (sync_average > sync_average_max) { \
329 sync_average_max = sync_average; \
330 vsync_offset = HORIZ_OFFSET; \
331 vsync_sample = 0; \
332 vsync_value_low = BIG; \
333 vsync_value_high = -BIG; \
334 } \
335 } \
336 /* Remember highest and lowest input values in area near correlation \
337 * peak. These values are then used to calculate DC offset and scale \
338 * factors for the video image. \
339 */ \
340 if (vsync_sample < SYNC_HIGH + SYNC_LOW) { \
341 vsync_sample++; \
342 if (current < vsync_value_low) vsync_value_low = current; \
343 if (current > vsync_value_high) vsync_value_high = current; \
344 } }
345
346 /* Work function for performing actual processing of data.
347 */
348 template<class iType> void
349 VrTV<iType>::work(int n)
350 {
351 register char *inputp, *inputreadptr;
352 int increment_amount, inputvalid;
353 register int this_input;
354 register TAPTYPE *tapp, *lasttap;
355 register TAPTYPE result_re, result_im;
356 int current, tempvid, video_value;
357 int increment_output, outputvalid;
358 complex *outputptr;
359 VrComplex audio_result, video_result;
360 int begin_skip;
361
362 inputreadptr = inputReadPtr(-tapincrement + 1);
363 increment_amount = 0;
364 inputvalid = validUnits();
365 outputptr = getWritePtr();
366 outputvalid = bufferLeftTillWrap();
367 increment_output = 0;
368 begin_skip = 0;
369 for (int work_index=0;work_index<n;) {
370 /* Perform the audio processing.
371 */
372 AUDIO_PROCESSING();
373
374 if (video_skip > 0)
375 goto skip_video_processing;
376 /* video_skip counts down the number of input samples that are
377 * skipped before the next video output point is to be calculated.
378 * Since this number has gone to 0, we calculate either the next
379 * video sample (using the 'short' FIR filter) or calculate a
380 * data point that is used in the convolution calculation when
381 * looking for vertical sync (using the 'long' FIR filter).
382 */
383
384 VIDEO_AM_DEMOD();
385 CHECK_END_OF_LINE();
386 CHECK_END_OF_FIELD();
387
388 /* Perform the X window processing for the start of a new video field
389 */
390 if (vsync_start) { /* we got a vertical sync */
391 vsync_start = 0;
392 START_NEW_VSYNC();
393 }
394 /* Perform the X window processing for successive pixels in a video image
395 */
396 else { /* here we add pixels to the output image */
397 counts_between++;
398 if (current_decim++ > decim_max && current_pointer) {
399 hdata++;
400 video_value = (current - dc_base) >> video_shift_amount; /* make max value = color MAX_COLOR */
401 if (video_value > 255) {
402 video_value = 255;
403 video_too_high++;
404 }
405 else if (video_value < 0) {
406 video_value = 0;
407 video_too_low++;
408 }
409 if (current_column++ < MY_LINE_SIZE) {
410 *current_pointer++ = video_value;
411 *current_pointer++ = video_value; /* to double width of image */
412 }
413 current_decim = 0;
414 }
415 }
416 DEBUG_OUTPUT();
417
418 skip_video_processing:
419 /* Output demodulated data to the buffer for passing to the audio
420 * driver.
421 */
422 if (process_audio <= 0) {
423 process_audio = audio_decimate;
424 if (outputvalid-- > 0) {
425 *outputptr++ = audio_result;
426 increment_output++;
427 }
428 else {
429 incWritePtr(increment_output);
430 outputWrite(audio_result);
431 outputptr = getWritePtr();
432 outputvalid = bufferLeftTillWrap();
433 increment_output = 0;
434 }
435 work_index++;
436 }
437 /* Increment input pointers to get to the next sample that must be
438 * processed.
439 */
440 increment_amount += current_decimation;
441 inputvalid -= current_decimation;
442 if (inputvalid > 0)
443 inputreadptr += current_decimation;
444 else {
445 incInput(increment_amount);
446 inputreadptr = inputReadPtr(-tapincrement + 1);
447 inputvalid = validUnits();
448 increment_amount = 0;
449 }
450 #if 1
451 if (begin_skip) {
452 begin_skip = 0;
453 current_decimation = audio_decimate;
454 }
455 if (video_skip < current_decimation)
456 current_decimation = decimation;
457 #endif
458 process_audio -= current_decimation;
459 video_skip -= current_decimation;
460 }
461 if (increment_amount > 0)
462 incInput(increment_amount);
463 if (increment_output > 0)
464 incWritePtr(increment_output);
465 }
466
467 /* Calculate tap values for a Hamming window of the requested order.
468 *
469 * FIR filter definition:
470 * VrTV( cutoff freq., Num of taps, decimation factor, center frequency)
471 * cutoff (Hz) = 0.0 => LPF using Hamming window, o/w LPF transformed to have higher cutoff freq
472 * decimation factor => set to one unless some integer >1 specified
473 * center_freq (Hz) => used to specify a composite frequency-shifting filter (i.e. channel filter)
474 */
475 template<class iType> void
476 VrTV<iType>::calculate_taps(TAPTYPE *arg_taps, mmxTaps **arg_mmxtaps, int arg_order,
477 float arg_center_freq, int arg_normalize, float arg_gain)
478 {
479 int index;
480 float M = arg_order-1; /* filter Order */
481 float ftemp, my_gain;
482 float taptemp_re, taptemp_im;
483 VrComplex temptaps[MAX_TAPS];
484
485 TVinSampFreq = getInputSamplingFrequencyN(0);
486 if (arg_center_freq == 0.0){ // produces a low-pass filter using a real Hamming window
487 for ( index=0 ; index < arg_order ; index++)
488 temptaps[index] = VrComplex(arg_gain * (0.54-0.46*
489 cos(2*M_PI*((float)index)/M)), 0.0);
490 } else { // Build composite Complex Filter => adds freq-shifting part
491 if (arg_normalize) {
492 ftemp = 0.0;
493 for ( index=0 ; index < arg_order ; index++)
494 ftemp += 0.54-0.46*cos(2*M_PI*(float)index/(M));
495 /* Normalize gain of FIR filter to be independant of number of taps */
496 my_gain = arg_gain * 4.94 / ftemp;
497 }
498 else
499 my_gain = arg_gain;
500 arg = (2*M_PI*arg_center_freq / (float)TVinSampFreq);
501 //printf ("total of all taps %f arg %f gain %f\n", ftemp, arg, my_gain);
502 for ( index=0 ; index < arg_order ; index++) {
503 ftemp = index;
504 temptaps[index] = VrComplex(my_gain*cos(arg*ftemp)
505 * (0.54-0.46*cos(2*M_PI*ftemp /M) ),
506 -my_gain*sin(arg*ftemp)
507 * (0.54-0.46*cos(2*M_PI*ftemp/M) ) );
508 }
509 //phase_corr_incr_re = (TAPTYPE) (cos(arg*decimation) * (1 << SHIFT_INC));
510 //phase_corr_incr_im = (TAPTYPE) (-sin(arg*decimation) * (1 << SHIFT_INC));
511 for ( index=0 ; index < arg_order ; index++) {
512 arg_taps[2 * index] = (TAPTYPE) (real(temptaps[index]) * (1 << SHIFT_INC));
513 arg_taps[2 * index + 1] = (TAPTYPE) (imag(temptaps[index]) * (1 << SHIFT_INC));
514 }
515 }
516 #if defined (ENABLE_MMX)
517 *arg_mmxtaps=new mmxTaps(temptaps,arg_order);
518 #endif
519 }
520
521 template<class iType>
522 VrTV<iType>::VrTV(int c,int t,int dec,int freq, float g, int arg_audio_decimate, int arg_audio_taps)
523 :VrSigProc<iType,complex>(2),numTaps(t),cutoff(c),center_freq(freq),gain(g),decimation(dec)
524 {
525 audio_decimate = arg_audio_decimate;
526 audio_taps = arg_audio_taps;
527 current_decimation = decimation;
528 }
529
530 void init_application()
531 {
532 if (!master_application) {
533 master_application = new QApplication(qtemp_argc, qtemp_argv);
534 master_widget = new QWidget();
535 master_widget->setMinimumSize(570, 590);
536 master_layout = new QVBoxLayout(master_widget);
537 master_application->setMainWidget(master_widget);
538 master_widget->resize(570, 640);
539 }
540 }
541
542 void begin_application()
543 {
544 if (!qtemp_started) {
545 qtemp_started = 1;
546 master_widget->show();
547 master_application->exit_loop(); /* tell a.exec() to not stay forever */
548 master_application->exec(); /* start up windowing package */
549 master_application->processEvents(0); /* we need to call this to finish initialization! */
550 }
551 }
552 /* Initialize any variables needed by the module.
553 */
554 template<class iType>
555 void VrTV<iType>::initialize()
556 {
557 int shmMajorv;
558 int shmMinorv;
559 int shmSharedPixmaps;
560 int i;
561 QPainter *master_painter;
562 float M, arg;
563
564 init_application();
565 taps = new TAPTYPE[2 * MAX_TAPS];
566 synctaps = new TAPTYPE[2 * MAX_TAPS];
567 setHistory(SYNC_ORDER);
568 calculate_taps(synctaps, &mmxsynctaps, SYNC_ORDER, center_freq, 1, gain); /* calculate first */
569 calculate_taps(taps, &mmxtaps, numTaps, center_freq, 1, gain);
570 dc_base = 1;
571 video_shift_amount = 7;
572
573 /* Initialize the audio filter */
574 audio_phase_correction = VrComplex(1,0);
575 audio_center_freq = AUDIO_CENTER;
576 audio_gain = 2.0;
577 M = audio_taps-1; /* filter Order */
578 arg = 2*M_PI*audio_center_freq / (float)TVinSampFreq;
579 audio_tap_values = new VrComplex[audio_taps];
580 for ( i=0 ; i < audio_taps ; i++)
581 audio_tap_values[i] = VrComplex(audio_gain*cos(arg*i)*(0.54-0.46*cos(2*M_PI*i/(M))),
582 audio_gain*(-1)*sin(arg*i)*(0.54-0.46*cos(2*M_PI*i/(M))));
583 audio_phase_corr_incr = VrComplex(cos(arg*(float)audio_decimate),
584 (-1)*sin(arg*(float)audio_decimate));
585 tapincrement = numTaps;
586 #if defined (ENABLE_MMX)
587 audio_processedTaps=new mmxTaps(audio_tap_values,audio_taps);
588 mmxvideo = mmxtaps;
589 #endif
590 tapstart = taps;
591 tapend = &taps[numTaps*2];
592
593 /* Initialize the X window interface for (if possible) displaying
594 * images using the shared memory X interface. Note that this interface
595 * runs correctly on Linux x86 machines, but does not appear to run correctly
596 * on Linux Alpha.
597 */
598 master_painter = new QPainter(master_widget);
599 my_local_hd = master_painter->get_hd();
600 my_local_dpy = master_painter->get_dpy();
601 noShm = XDisplayString(my_local_dpy)[0] != ':'
602 || !XShmQueryVersion(my_local_dpy, &shmMajorv, &shmMinorv, &shmSharedPixmaps);
603 printf ("do we not have shared memory ? shm = %d\n", noShm);
604 //noShm = 1; /* no shared memory present */
605 if (!noShm) {
606 my_local_ximage =
607 XShmCreateImage(my_local_dpy, (Visual *)NULL,
608 8 /* depth */, ZPixmap, 0, &shmInfo, MY_LINE_SIZE, MY_HEIGHT);
609 shmInfo.shmid = shmget(IPC_PRIVATE, MY_LINE_SIZE * MY_HEIGHT, IPC_CREAT | 0777);
610 local_newbits = (uchar *) shmat (shmInfo.shmid, (char *)NULL, 0);
611 shmInfo.shmaddr = (char *)local_newbits;
612 shmInfo.readOnly = False;
613 XShmAttach(my_local_dpy, &shmInfo);
614 shmctl(shmInfo.shmid, IPC_RMID, 0);
615 }
616 else {
617 my_local_ximage =
618 XCreateImage(my_local_dpy, (Visual *)NULL,
619 8 /* depth */, ZPixmap, 0, 0, MY_LINE_SIZE, MY_HEIGHT, 32, MY_LINE_SIZE);
620 local_newbits = (uchar *)malloc(MY_LINE_SIZE * MY_HEIGHT);
621 }
622 my_local_ximage->data = (char *)local_newbits;
623 for ( i=0; i<256; i++ ) { // allocate colors
624 QColor c( i, i, i );
625 pix[i] = c.pixel();
626 }
627 shm_put();
628 }
629
630 /* Update the X window video image using stored data.
631 */
632 void shm_put()
633 {
634 register uchar *p;
635 int i;
636
637 p = local_newbits;
638 i = MY_LINE_SIZE * MY_HEIGHT;
639 while (i-- > 0) {
640 *p = pix[*p];
641 p++;
642 }
643 if (!noShm)
644 XShmPutImage(my_local_dpy, my_local_hd, qt_xget_readonly_gc(), my_local_ximage,
645 0, 0, 50, 250, my_local_ximage->width, my_local_ximage->height, 0);
646 else
647 XPutImage(my_local_dpy, my_local_hd, qt_xget_readonly_gc(), my_local_ximage,
648 0, 0, 50, 250, my_local_ximage->width, my_local_ximage->height);
649 XFlush(my_local_dpy);
650 }
651
652 /* Free the shared memory used by the X window interface. This function
653 * is never called and appears to not be necessary. (No memory leaks
654 * occur even though it is not used).
655 */
656 void shm_stop()
657 {
658 if (shmInfo.shmid >= 0) {
659 XShmDetach(my_local_dpy, &shmInfo);
660 shmInfo.shmid = -1;
661 }
662 }
663
664 /* Delete any variables used by the module.
665 */
666 template<class iType>
667 VrTV<iType>::~VrTV()
668 {
669 delete taps;
670 delete synctaps;
671 delete audio_tap_values;
672 }
673 #endif

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26