Poly
Poly
with Jupyter
ESIEE-PARIS
2
Page 2/255
Contents
5 Transfer function 59
5.1 The Plancherel relation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 59
5.2 Consequences . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 60
3
4
7 Filtering 65
Page 4/255
5
Page 5/255
6
Page 6/255
7
Page 7/255
8
Page 8/255
A basic introduction to signals and systems
1
1.1 Effects of delays and scaling on signals
In this simple exercise, we recall the effect of delays and scaling on signals. It is important for students to
experiments with that to ensure that they master these simple transformations.
# Define a simple fu n c t i o n
def f ( t ) :
r e t u r n np . exp ( −0.25∗ t ) i f t >0 e l s e 0
T= np . l i n s p a c e ( − 1 0 , 2 0 , 2 0 0 )
L= l e n ( T )
x=np . z e r o s ( L ) # r e s e r v e some s p a c e f o r x
t 0 = 0 ; a =1 # i n i t i a l v a l u e s
# Compute x a s f ( a ∗ t + t 0 )
k=0
for t in T:
x [ k ]= f ( a∗ t + t 0 )
k=k+1
# Experiment w i t h s e v e r a l v a l u e s o f a and t 0 :
# a =1 t 0 =0
# a =1 t 0 =+5 ( advance )
# a =1 t 0 =−5 ( delay )
# a=−1 t 0 =0 ( time r e v e r s e )
# a=−1 t 0 =5 ( time r e v e r s e + advance )
# a=−1 t 0 =−5 (...)
This to show that you do automatically several tests and plot the results all together.
9
10 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
d e f compute_x ( a , t 0 ) :
k=0
for t in T:
x [ k ]= f ( a∗ t + t 0 )
k=k+1
return x
l i s t _ t e s t s = [ ( 1 , 0 ) , ( 1 , 5 ) , ( 1 , −5) ] # , ( − 1 , 0 ) , ( − 1 , 3 ) , ( −1 , −3) ]
for ( a , t0 ) in l i s t _ t e s t s :
x= compute_x ( a , t 0 )
p l t . p l o t ( T , x , l a b e l = " a = { } , t 0 ={} " . f o r m a t ( a , t 0 ) )
def f ( t ) :
o u t =np . z e r o s ( l e n ( t ) )
t p o s =np . where ( t > 0)
o u t [ t p o s ] = np . exp ( −0.25∗ t [ t p o s ] )
r e t u r n out
t = np . l i n s p a c e ( − 1 0 , 2 0 , 2 0 0 )
L= l e n ( t )
x=np . z e r o s ( L )
d e f compute_xx ( t 0 , a ) :
x= f ( a ∗ t + t 0 )
Page 10/255
1.1. EFFECTS OF DELAYS AND SCALING ON SIGNALS 11
plt . plot ( t , x)
p l t . a x i s ( [ −10 , 2 0 , 0 , 1 ] )
Page 11/255
12 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
• Section ??
• Section 1.2.2
• Section ??
r [ L :M]=1
#
p l t . stem ( r )
_= p l t . y l i m ( [ 0 , 1 . 2 ] )
d e f op1 ( s i g n a l ) :
t r a n s f o r m e d _ s i g n a l =np . z e r o s ( np . s i z e ( s i g n a l ) )
f o r t i n np . a r a n g e ( np . s i z e ( s i g n a l ) ) :
t r a n s f o r m e d _ s i g n a l [ t ] = s i g n a l [ t ]− s i g n a l [ t −1]
return transformed_signal
d e f op2 ( s i g n a l ) :
t r a n s f o r m e d _ s i g n a l =np . z e r o s ( np . s i z e ( s i g n a l ) )
f o r t i n np . a r a n g e ( np . s i z e ( s i g n a l ) ) :
t r a n s f o r m e d _ s i g n a l [ t ] = 0 . 5 ∗ s i g n a l [ t ] + 0 . 5 ∗ s i g n a l [ t −1]
return transformed_signal
Page 12/255
1.2. A BASIC INTRODUCTION TO FILTERING 13
plt . figure ()
p l t . s t e m ( op1 ( r ) )
_= p l t . y l i m ( [ − 1 . 2 , 1 . 2 ] )
p l t . t i t l e ( " F i l t e r i n g o f r e c t a n g u l a r s i g n a l w i t h op1 " )
plt . figure ()
p l t . s t e m ( op2 ( r ) , ’ r ’ )
_= p l t . y l i m ( [ − 0 . 2 , 1 . 2 ] )
p l t . t i t l e ( " F i l t e r i n g o f r e c t a n g u l a r s i g n a l w i t h op2 " )
---------------------------------------------------------------------------
<ipython-input-4-e2dbae2047ad> in <module>()
4 plt.title("Filtering of rectangular signal with op1")
5 plt.figure()
----> 6 plt.stem(op2(r),’r’)
7 _=plt.ylim([-0.2, 1.2])
8 plt.title("Filtering of rectangular signal with op2")
/usr/local/lib/python3.5/site-packages/matplotlib/pyplot.py in stem(linefmt
2924 *args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,
2925 bottom=bottom, label=label, **({"data": data} if data is not
-> 2926 None else {}))
2927
2928
/usr/local/lib/python3.5/site-packages/matplotlib/__init__.py in inner(ax,
1808 "the Matplotlib list!)" % (label_namer, func.__
1809 RuntimeWarning, stacklevel=2)
-> 1810 return func(ax, *args, **kwargs)
1811
1812 inner.__doc__ = _add_data_doc(inner.__doc__,
/usr/local/lib/python3.5/site-packages/matplotlib/axes/_axes.py in stem(sel
2625 else:
2626 x = y
-> 2627 y = np.asarray(args[0], dtype=float)
2628 args = args[1:]
2629
/usr/local/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a,
499
500 """
Page 13/255
14 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
We define a sine wave and check that the operation implemented by “op1” seems to be a derivative. . .
t =np . l i n s p a c e ( 0 , 1 0 0 , 5 0 0 )
Page 14/255
1.2. A BASIC INTRODUCTION TO FILTERING 15
s i g =np . s i n ( 2 ∗ p i ∗ 0 . 0 5 ∗ t )
p l t . p l o t ( t , sig , l a b e l =" I n i t i a l s i g n a l " )
p l t . p l o t ( t , 5 / ( 2 ∗ p i ∗ 0 . 0 5 ) ∗ op1 ( s i g ) , l a b e l = " F i l t e r e d s i g n a l " )
p l t . legend ( )
Composition of operations:
p l t . s t e m ( op1 ( op2 ( r ) ) , ’ r ’ )
_= p l t . y l i m ( [ − 1 . 2 , 1 . 2 ] )
---------------------------------------------------------------------------
<ipython-input-6-96c17cc23118> in <module>()
----> 1 plt.stem(op1(op2(r)),’r’)
2 _=plt.ylim([-1.2, 1.2])
/usr/local/lib/python3.5/site-packages/matplotlib/pyplot.py in stem(linefmt
2924 *args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,
2925 bottom=bottom, label=label, **({"data": data} if data is not
-> 2926 None else {}))
2927
2928
/usr/local/lib/python3.5/site-packages/matplotlib/__init__.py in inner(ax,
1808 "the Matplotlib list!)" % (label_namer, func.__
1809 RuntimeWarning, stacklevel=2)
-> 1810 return func(ax, *args, **kwargs)
Page 15/255
16 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
1811
1812 inner.__doc__ = _add_data_doc(inner.__doc__,
/usr/local/lib/python3.5/site-packages/matplotlib/axes/_axes.py in stem(sel
2625 else:
2626 x = y
-> 2627 y = np.asarray(args[0], dtype=float)
2628 args = args[1:]
2629
/usr/local/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a,
499
500 """
--> 501 return array(a, dtype, copy=False, order=order)
502
503
d e f op3 ( s i g n a l ) :
t r a n s f o r m e d _ s i g n a l =np . z e r o s ( np . s i z e ( s i g n a l ) )
f o r t i n np . a r a n g e ( np . s i z e ( s i g n a l ) ) :
t r a n s f o r m e d _ s i g n a l [ t ] = 0 . 7 ∗ t r a n s f o r m e d _ s i g n a l [ t −1]+ s i g n a l [ t ]
return transformed_signal
p l t . s t e m ( op3 ( r ) , ’ r ’ )
p l t . t i t l e ( " F i l t e r i n g o f r e c t a n g u l a r s i g n a l w i t h op3 " )
Page 16/255
1.2. A BASIC INTRODUCTION TO FILTERING 17
_= p l t . y l i m ( [ − 0 . 2 , 3 . 2 ] )
---------------------------------------------------------------------------
<ipython-input-7-3cf0c745df68> in <module>()
5 return transformed_signal
6
----> 7 plt.stem(op3(r),’r’)
8 plt.title("Filtering of rectangular signal with op3")
9 _=plt.ylim([-0.2, 3.2])
/usr/local/lib/python3.5/site-packages/matplotlib/pyplot.py in stem(linefmt
2924 *args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,
2925 bottom=bottom, label=label, **({"data": data} if data is not
-> 2926 None else {}))
2927
2928
/usr/local/lib/python3.5/site-packages/matplotlib/__init__.py in inner(ax,
1808 "the Matplotlib list!)" % (label_namer, func.__
1809 RuntimeWarning, stacklevel=2)
-> 1810 return func(ax, *args, **kwargs)
1811
1812 inner.__doc__ = _add_data_doc(inner.__doc__,
/usr/local/lib/python3.5/site-packages/matplotlib/axes/_axes.py in stem(sel
2625 else:
2626 x = y
-> 2627 y = np.asarray(args[0], dtype=float)
2628 args = args[1:]
2629
/usr/local/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a,
499
500 """
--> 501 return array(a, dtype, copy=False, order=order)
502
503
Page 17/255
18 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
A curiosity
d e f op4 ( s i g n a l ) :
t r a n s f o r m e d _ s i g n a l =np . z e r o s ( np . s i z e ( s i g n a l ) )
f o r t i n np . a r a n g e ( np . s i z e ( s i g n a l ) ) :
t r a n s f o r m e d _ s i g n a l [ t ] = 1∗ t r a n s f o r m e d _ s i g n a l [ t −1]+ s i g n a l [ t ]
return transformed_signal
p l t . s t e m ( op4 ( r ) , ’ r ’ )
p l t . t i t l e ( " F i l t e r i n g o f r e c t a n g u l a r s i g n a l w i t h op4 " )
_= p l t . y l i m ( [ − 0 . 2 , 5 . 6 ] )
# And t h e n . .
plt . figure ()
p l t . s t e m ( op1 ( op4 ( r ) ) , ’ r ’ )
p l t . t i t l e ( " F i l t e r i n g o f r e c t a n g u l a r s i g n a l w i t h op1 ( op4 ) " )
_= p l t . y l i m ( [ − 0 . 2 , 1 . 2 ] )
---------------------------------------------------------------------------
<ipython-input-8-46cc0d896a24> in <module>()
5 return transformed_signal
6
----> 7 plt.stem(op4(r),’r’)
8 plt.title("Filtering of rectangular signal with op4")
9 _=plt.ylim([-0.2, 5.6])
Page 18/255
1.2. A BASIC INTRODUCTION TO FILTERING 19
/usr/local/lib/python3.5/site-packages/matplotlib/pyplot.py in stem(linefmt
2924 *args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,
2925 bottom=bottom, label=label, **({"data": data} if data is not
-> 2926 None else {}))
2927
2928
/usr/local/lib/python3.5/site-packages/matplotlib/__init__.py in inner(ax,
1808 "the Matplotlib list!)" % (label_namer, func.__
1809 RuntimeWarning, stacklevel=2)
-> 1810 return func(ax, *args, **kwargs)
1811
1812 inner.__doc__ = _add_data_doc(inner.__doc__,
/usr/local/lib/python3.5/site-packages/matplotlib/axes/_axes.py in stem(sel
2625 else:
2626 x = y
-> 2627 y = np.asarray(args[0], dtype=float)
2628 args = args[1:]
2629
/usr/local/lib/python3.5/site-packages/numpy/core/numeric.py in asarray(a,
499
500 """
--> 501 return array(a, dtype, copy=False, order=order)
502
503
1.2.2 Filters
Definition A filter is a time-invariant linear system.
• Time invariance means that if y(n) is the response associated with an input x(n), then y(n − n0 ) is the
response associated with the input x(n − n0 ).
• Linearity means that if y1 (n) and y2 (n) are the outputs associated with x1 (n) and x2 (n), then the output
associated with a1 x1 (n) + a2 x2 (n) is a1 y1 (n) + a2 y2 (n) (superposition principle)
• x(n) → 2x(n)
Page 19/255
20 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
• x(n) → 2x(n) + 1
• x(n) → x(n)2
Definition 2. The impulse response of a system is nothing but the output of the system excited by a Dirac
impulse. It is often denoted h(h).
δ (n) → System → h(n)
def dirac ( n ) :
# dirac function
r e t u r n 1 i f n==0 e l s e 0
d e f d i r a c _ v e c t o r (N) :
o u t = np . z e r o s (N)
out [0]=1
r e t u r n out
d= d i r a c _ v e c t o r ( 2 0 )
f i g , ax = p l t . s u b p l o t s ( 2 , 2 , s h a r e x = T r u e )
Page 20/255
1.2. A BASIC INTRODUCTION TO FILTERING 21
Curiosity (continued)
The impulse response of op4(op1) is given by
h=op4 ( op1 ( d i r a c _ v e c t o r ( 2 0 ) ) )
p l t . stem ( h , l a b e l =" F i l t e r 4 ( 1 ) " )
_= p l t . a x i s ( [ − 5 , 2 0 , 0 , 1 . 2 ] )
This is nothing but a Dirac impulse! We already observed that op4(op1(signal))=signal; that is the filter
is an identity transformation. In other words, op4 acts as the “inverse” of op1. Finally, we note that the
impulse response of the indentity filter is a Dirac impulse.
Page 21/255
22 CHAPTER 1. A BASIC INTRODUCTION TO SIGNALS AND SYSTEMS
Page 22/255
Introduction to the Fourier representation
2
We begin by a simple example which shows that the addition of some sine waves, with special coefficients,
converges constructively. We then explain that any periodic signal can be expressed as a sum of sine waves.
This is the notion of Fourier series. After an illustration (denoising of a corrupted signal) which introduces
a notion of filtering in the frequency domain, we show how the Fourier representation can be extended to
aperiodic signals.
• Section ??
• Section ??
• Section ??
• Section ??
• Section ??
• Section ??
N = 100
L = 20
s = np . z e r o s (N − 1 )
f o r k i n np . a r a n g e ( 1 , 3 0 0 , 2 ) :
s = s + 1 / f l o a t ( k ) ∗ s i n ( 2 ∗ p i ∗ k / L ∗ np . a r a n g e ( 1 , N, 1 ) )
plt . plot ( s )
p l t . t i t l e ( "Somme a v e c " + s t r ( ( k − 1 ) / 2 + 1 ) + " t e r m e s " )
The next example is more involved in that it sums sin a cos of different frequencies and with different
amplitudes. We also add widgets (sliders) which enable to interact more easily with the program.
23
24 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
d e f s f o u _ e x p (Km) :
# c l e a r _ o u t p u t ( w a i t =True )
Kmax = i n t (Km)
L = 400
N = 1000
k = 0
s = np . z e r o s (N − 1 )
# plt . clf ()
f o r k i n np . a r a n g e ( 1 , Kmax ) :
ak = 0
bk = 1 . 0 / k i f ( k % 2 ) == 1 e l s e 0 # k odd
# ak =0 # i f ( k % 2 ) == 1 e l s e − 2 . 0 / ( p i ∗k ∗ ∗ 2 )
# bk = −1.0/ k i f ( k % 2 ) == 1 e l s e 1 . 0 / k #
s = s + ak ∗ c o s ( 2 ∗ p i ∗ k / L ∗ np . a r a n g e ( 1 , N, 1 ) ) + bk ∗ s i n (
2 ∗ pi ∗ k / L ∗ np . a r a n g e ( 1 , N, 1 ) )
ax = p l t . a x e s ( x l i m = ( 0 , N) , y l i m =( −2 , 2 ) )
ax . p l o t ( s )
p l t . t i t l e ( "Sum w i t h {} terms " . format ( k + 1) )
p l t . show ( )
# ## −−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
---------------------------------------------------------------------------
<ipython-input-3-67dbb1757edc> in <module>()
----> 1 @out.capture(clear_output=True, wait=True)
Page 24/255
2.1. SIMPLE EXAMPLES 25
2 def sfou_exp(Km):
3 #clear_output(wait=True)
4 Kmax = int(Km)
5 L = 400
# −−−− W i d g e t s −−−−−−−−−−−−−−−−−−−−−−−
# s l i d e r = w i d g e t s . F l o a t S l i d e r ( max =100 , min =0 , s t e p =1 , v a l u e =1 )
s l i d e = w i d g e t s . I n t S l i d e r ( max =100 , min =0 , s t e p =1 , v a l u e = 5 )
val = widgets . IntText ( value= ’1 ’ )
#−−−−− C a l l b a c k s d e s w i d g e t s −−−−−−−−−−−−−
@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
d e f s f o u _ e x p (Km) :
# c l e a r _ o u t p u t ( w a i t =True )
Kmax = i n t (Km)
L = 400
N = 1000
k = 0
s = np . z e r o s (N − 1 )
# plt . clf ()
f o r k i n np . a r a n g e ( 1 , Kmax ) :
ak = 0
bk = 1 . 0 / k i f ( k % 2 ) == 1 e l s e 0 # k odd
# ak =0 # i f ( k % 2 ) == 1 e l s e − 2 . 0 / ( p i ∗k ∗ ∗ 2 )
# bk = −1.0/ k i f ( k % 2 ) == 1 e l s e 1 . 0 / k #
s = s + ak ∗ c o s ( 2 ∗ p i ∗ k / L ∗ np . a r a n g e ( 1 , N, 1 ) ) + bk ∗ s i n (
2 ∗ pi ∗ k / L ∗ np . a r a n g e ( 1 , N, 1 ) )
ax = p l t . a x e s ( x l i m = ( 0 , N) , y l i m =( −2 , 2 ) )
ax . p l o t ( s )
p l t . t i t l e ( "Sum w i t h {} terms " . format ( k + 1) )
p l t . show ( )
# ## −−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
#@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
d e f sfou1_Km ( param ) :
Km = param [ ’ new ’ ]
v a l . v a l u e = s t r (Km)
s f o u _ e x p (Km)
#@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
d e f sfou2_Km ( param ) :
Page 25/255
26 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
Km = param . new
s l i d e . v a l u e = Km
# s f o u _ e x p (Km. v a l u e )
# −−−− D i s p l a y −−−−−−−−−−−−−−−−−
# display ( slide )
# display ( val )
s l i d e . o b s e r v e ( sfou1_Km , names = [ ’ v a l u e ’ ] )
sfou_exp (5)
# v a l . o b s e r v e ( sfou2_Km , names = ’ v a l u e ’ )
d i s p l a y ( w i d g e t s . VBox ( [ s l i d e , o u t ] ) )
f , ax = s u b p l o t s ( 1 , 1 , f i g s i z e = ( 4 , 4 ) )
ax . s e t _ x l i m ( [ − 1 , 3 ] )
ax . s e t _ y l i m ( [ − 1 , 3 ] )
ax . s p i n e s [ ’ r i g h t ’ ] . s e t _ c o l o r ( ’ none ’ )
ax . s p i n e s [ ’ t o p ’ ] . s e t _ c o l o r ( ’ none ’ )
# ax . s p i n e s [ ’ b o t t o m ’ ] . s e t _ p o s i t i o n ( ’ c e n t e r ’ )
Page 26/255
2.2. DECOMPOSITION OF PERIODIC FUNCTIONS – FOURIER SERIES 27
ax . q u i v e r (
0 , 0 , z [ 0 ] , z [ 1 ] , a n g l e s = ’ xy ’ , s c a l e _ u n i t s = ’ xy ’ , s c a l e =1 , c o l o r = ’ g r e e n ’)
ax . q u i v e r (
0 , 0 , u [ 0 ] , u [ 1 ] , a n g l e s = ’ xy ’ , s c a l e _ u n i t s = ’ xy ’ , s c a l e =1 , c o l o r = ’ b l a c k ’)
ax . q u i v e r (
0 , 0 , v [ 0 ] , v [ 1 ] , a n g l e s = ’ xy ’ , s c a l e _ u n i t s = ’ xy ’ , s c a l e =1 , c o l o r = ’ b l a c k ’)
ax . q u i v e r (
0 , 0 , u1 [ 0 ] , u1 [ 1 ] , a n g l e s = ’ xy ’ , s c a l e _ u n i t s = ’ xy ’ , s c a l e =1 , c o l o r = ’ r e d ’)
ax . q u i v e r (
0 , 0 , v1 [ 0 ] , v1 [ 1 ] , a n g l e s = ’ xy ’ , s c a l e _ u n i t s = ’ xy ’ , s c a l e =1 , c o l o r = ’ r e d ’)
ax . x a x i s . s e t _ t i c k s _ p o s i t i o n ( ’ b o t t o m ’ )
ax . y a x i s . s e t _ t i c k s _ p o s i t i o n ( ’ l e f t ’ )
From a coordinate system to another: Take a vector (in green in the illustration). Its coordinates in the
system (u, v) are [1,2]. In order to obtain the coordinates in the new system (O, u1 , v1 ), we have to project
the vector on u1 and u2 . This is done by the scalar products:
x = z . d o t ( u1 )
y = z . d o t ( v1 )
p r i n t ( ’New c o o r d i n a t e s : ’ , x , y )
Page 27/255
28 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
L = 200
k = 8
l = 3
s k = s q r t ( 2 / L ) ∗ c o s ( 2 ∗ p i / L ∗ k ∗ np . a r a n g e ( 0 , L ) )
s l = s q r t ( 2 / L ) ∗ c o s ( 2 ∗ p i / L ∗ l ∗ np . a r a n g e ( 0 , L ) )
s l . dot ( s l )
1.0000000000000004
1.9999999999999998
Therefore, the decomposition of any even periodic function x(n) with period L on the basis of cosines
expresses as √ ( )
2 a0 L−1
x(n) = + ∑ ak cos(2π k/Ln)
L 2 k=1
with
√
2
ak = ∑ x(n) cos(2π k/Ln).
L n∈[L]
with
2
ak = ∑ x(n) cos(2π k/Ln),
L n∈[L]
where the notation n ∈ [L] indicates that the sum has to be done on any length-L interval. The very same
reasoning can be done for odd functions, which introduces a decomposition into sine waves:
L−1
xodd (n) = ∑ bk sin(2π k/Ln)
k=0
with
2
bk = ∑ x(n) sin(2π k/Ln),
L n∈[L]
Page 28/255
2.3. COMPLEX FOURIER SERIES 29
+∞
a0 L−1
x(n) = + ∑ ak cos(2π k/Ln) + ∑ bk sin(2π k/Ln)
2 k=1 k=1
with
{
ak = L2 ∑n∈[L] x(n) cos(2π k/Ln),
bk = L2 ∑n∈[L] x(n) sin(2π k/Ln),
This is the definition of the Fourier series, and this is no more compicated than that. . . A remaining question
is the question of convergence. That is, does the series converge to the true function? The short answer is
Yes: the equality in the series expansion is a true equality, not an approximation. This is a bit out of scope
for this course, but you may have a look at this article.
There of course exists a continuous version, valid for time-continuous dignals.
L−1
∑ ck e j2π
kn
x(n) = L
k=0
1
∑ x(n)e− j2π L
kn
with ck =
L n∈[L]
where ck is the dot product between x(n) and exp( j2π k/Ln), i.e. the ‘coordinate’ of x with respect to the
‘vector’ exp( j2π k/Ln). This is nothing but the definition of the complex Fourier series.
Exercise – Show that ck is periodic with period L; i.e. ck = ck+L .
Since ck is periodic in k of period L, we see that in term or the “normalized frequency” k/L, it is periodic
with period 1.
Page 29/255
30 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
Relation of the complex Fourier Series with the standard Fourier Series
It is easy to find a relation between this complex Fourier series and the classical Fourier series. The
series can be rewritten as
+∞
x(n) = c0 + ∑ ck e j2π k/Ln + c−k e− j2π k/Ln .
k=1
By using the Euler formulas, developping and rearranging, we get
+∞
x(n) = c0 + ∑ R {ck + c−k } cos(2π k/Ln) + I {c−k − ck } sin(2π k/Ln) (2.1)
k=1
+ j (R {ck − c−k } sin(2π k/Ln) + I {ck + c−k } cos(2π k/Ln)) . (2.2)
and, by the cancellation of the imaginary part, the following symmetry relationships for real signals:
{
R {ck } = R {c−k }
I {ck } = −I {c−k } .
%m a t p l o t l i b i n l i n e
L = 400
N = 500
t = np . a r a n g e (N)
s = s in (2 ∗ pi ∗ 3 ∗ t / L + pi / 4)
x = [ s s i f s s > −0.2 e l s e −0.2 f o r s s i n s ]
plt . plot ( t , x)
[<matplotlib.lines.Line2D at 0x7f17d7e863c8>]
Page 30/255
2.3. COMPLEX FOURIER SERIES 31
for k in karray :
r e s . a p p e n d ( np . v d o t ( exp ( 1 j ∗ 2 ∗ p i / L ∗ k ∗ np . a r a n g e ( 0 , L ) ) , x ) )
r e t u r n 1 / L ∗ np . a r r a y ( r e s )
Text(0.5, 0, ’Time’)
plt . figure ()
kk = np . a r a n g e ( −50 , 5 0 )
c = c o e f f c k ( x [ 0 : L ] , L , kk )
p l t . s t e m ( kk , np . a b s ( c ) )
p l t . t i t l e ( " F o u r i e r s e r i e s c o e f f i c i e n t s ( modulus ) " )
plt . xlabel ( "k" )
msg = " " " I n t h e f r e q u e n c y r e p r e s e n t a t i o n , t h e x a x i s c o r r e s p o n d s t o t h e
frequencies k /L
o f t h e complex e x p o n e n t i a l s .
Page 31/255
32 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
T h e r e f o r e , i f a s i g n a l i s p e r i o d i c o f p e r i o d M, t h e c o r r e s p o n d i n g
fundamental frequency
i s 1 /M. T h i s f r e q u e n c y t h e n a p p e a r s a t i n d e x ko=L /M ( i f t h i s r a t i o i s an
integer ) .
H a r m o n i c s w i l l a p p e a r a t m u l t i p l e s o f ko . " " "
p r i n t ( msg )
# d e f i n e a p u l s e t r a i n which w i l l c o r r u p t o u r o r i g i n a l s i g n a l
def sign ( x ) :
i f isinstance (x , ( int , f l o a t ) ) :
r e t u r n 1 i f x >= 0 e l s e −1
else :
r e t u r n np . a r r a y ( [ 1 i f u >= 0 e l s e −1 f o r u i n x ] )
# t e s t : s i g n ( [ 2 , 1 , −0.2 , 0 ] )
def repeat (x , n ) :
i f i s i n s t a n c e ( x , ( np . n d a r r a y , l i s t , i n t , f l o a t ) ) :
r e t u r n np . a r r a y ( [ l i s t ( x ) ∗ n ] ) . f l a t t e n ( )
Page 32/255
2.3. COMPLEX FOURIER SERIES 33
else :
r a i s e ( ’ i n p u t must be an a r r a y , l i s t , o r f l o a t / i n t ’ )
# t =np . a r a n g e (N)
# s i g = s i g n ( s i n ( 2 ∗ p i ∗10∗ t / L ) )
r e c t = np . c o n c a t e n a t e ( ( np . o n e s ( 2 0 ) , −np . o n e s ( 2 0 ) ) )
# [1 ,1 ,1 ,1 ,1 , −1 , −1 , −1 , −1 , −1]
(1.1, -1.1)
Text(0.5, 0, ’k’)
The fundamental frequency of the pulse train is 1 over the length of the pulse, that is 1/40 here. Since
The Fourier series is computed on a length L=400, the harmonics appear every 10 samples (ie at indexes k
multiples of 10).
Page 33/255
34 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
Page 34/255
2.3. COMPLEX FOURIER SERIES 35
z = x + 1 ∗ sig
plt . plot (z)
p l t . t i t l e ( " Corrupted signal " )
kk = np . a r a n g e ( −200 , 2 0 0 )
c z = c o e f f c k ( z [ 0 : L ] , L , kk )
plt . figure ()
p l t . s t e m ( kk , np . a b s ( c z ) )
p l t . t i t l e ( " F o u r i e r s e r i e s c o e f f i c i e n t s ( modulus ) " )
plt . xlabel ( "k" )
Text(0.5, 0, ’k’)
Now, we try to kill all the frequencies harmonics of 10 (the fundamental frequency of the pulse train),
and reconstruct the resulting signal. . .
# k i l l f r e q u e n c i e s h a r m o n i c s o f 10 ( t h e f u n d a m e n t a l f r e q u e n c y o f t h e p u l s e
train )
# and r e c o n s t r u c t t h e r e s u l t i n g s i g n a l
s = np . z e r o s (N)
kmin = np . min ( kk )
f o r k i n kk :
i f n o t k % 1 0 : # t r u e i f k i s m u l t i p l e o f 10
s = s + c z [ k + kmin ] ∗ exp ( 1 j ∗ 2 ∗ p i / L ∗ k ∗ np . a r a n g e ( 0 , N) )
plt . figure ()
p l t . p l o t ( t , np . r e a l ( s ) )
p l t . t i t l e ( " r e c o n s t r u c t i o n by F o u r i e r s e r i e s " )
p l t . x l a b e l ( " Time " )
plt . figure ()
plt . p l o t ( t , z − np . r e a l ( s ) )
plt . t i t l e ( " r e c o n s t r u c t i o n by F o u r i e r s e r i e s " )
plt . x l a b e l ( " Time " )
Page 35/255
36 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
Text(0.5, 0, ’Time’)
Page 36/255
2.3. COMPLEX FOURIER SERIES 37
Page 37/255
38 CHAPTER 2. INTRODUCTION TO THE FOURIER REPRESENTATION
Page 38/255
From Fourier Series to Fourier transforms
3
In this section, we go from the Fourier series to the Fourier transform for discrete signal. So doing, we also
introduce the notion of Discrete Fourier Transform that we will study in more details later. For now, we
focus on the representations in the frequency domain, detail and experiment with some examples.
N−1
∑ X(k)e j2π
kn
x(n) = N
k=0
N−1
1
∑ x(n)e− j2π
kn
with X(k) = N
N n=0
• we may also consider that there is nothing –that is zeros, outside of the observation interval. In such
condition, we can still imagine that we have a periodic signal, but with an infinite period. Since the
separation of two harmonics in the Fourier series is ∆ f =1/period, we see that ∆ f → 0. Then the
Fourier representation becomes continuous. This is illustrated below.
# compute t h e c o e f f s ck
def coeffck (x , L, k ) :
a s s e r t np . s i z e ( x ) == L , " i n p u t must be o f l e n g t h L"
karray = []
res = []
if isinstance (k , int ) :
k a r r a y . append ( k )
else :
k a r r a y = np . a r r a y ( k )
for k in karray :
39
40 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
r e s . a p p e n d ( np . v d o t ( exp ( 1 j ∗ 2 ∗ p i / L ∗ k ∗ np . a r a n g e ( 0 , L ) ) , x ) )
r e t u r n 1 / L ∗ np . a r r a y ( r e s )
Lpad = 20 # t h e n 2 0 0 , t h e n 2000
# define a rectangular pulse
r e c t = np . c o n c a t e n a t e ( ( np . o n e s ( 2 0 ) , −np . o n e s ( 2 0 ) ) )
# Add z e r o s a f t e r :
r e c t _ z e r o p a d d e d = np . c o n c a t e n a t e ( ( r e c t , np . z e r o s ( Lpad ) ) )
sig = rect_zeropadded
plt . plot ( sig )
# compute t h e F o u r i e r s e r i e s f o r | k / L s i g | < 1 / 4
L s i g = np . s i z e ( s i g )
fmax = i n t ( L s i g / 4 )
kk = np . a r a n g e (−fmax , fmax )
c = c o e f f c k ( s i g [ 0 : L s i g ] , L s i g , kk )
# plot it
plt . figure ()
p l t . s t e m ( kk / L s i g , np . a b s ( c ) )
p l t . t i t l e ( " F o u r i e r s e r i e s c o e f f i c i e n t s ( modulus ) " )
p l t . x l a b e l ( " N o r m a l i z e d f r e q u e n c y −− k / L s i g " )
Hence we obtain a formula where the discrete sum for reconstructing the time-series x(n) becomes a
Page 40/255
3.2. EXAMPLES 41
N−1 1−1/N
1
∑ ck e j2π N = ∑ NX(k)e j2π N
kn kn
x(n) =
k=0 k/N=0
N
∫ 1
→ x(n) = X( f )e j2π f n d f
0
Even before exploring the numerous properties of the Fourier transform, it is important to stress that
The Fourier transform of a discrete signal is periodic with period one.
Check it as an exercise! Begin with the formula for X( f ) an compute X( f + 1). use the fact that n is an
integer and that exp( j2π n) = 1.
3.2 Examples
Exercise 2. .
• Compute the Fourier transform of a rectangular window given on N points. The result is called a
(discrete) cardinal sine (or sometimes Dirichlet kernel). Sketch a plot, and study the behaviour of this
function with N.
Page 41/255
42 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
define a sine wave, complute and plot its Fourier transform. As the FFT is actually an implementation
of a discrete Fourier transform, we will have an approximation of the true Fourier transform by using zero-
padding (check that a parameter in the fft enables to do this zero-padding).
from numpy . f f t i m p o r t f f t , ifft
# D e f i n e a r e c t a n g u l a r window , o f l e n g t h L
# on N p o i n t s , z e r o p a d t o NN=1000
# t a k e eg L=100 , N=500
NN = 1000
L = 10 # 1 0 , t h e n 6 , t h e n 2 0 , t h e n 5 0 , t h e n 1 0 0 . . .
r = np . o n e s ( L )
Rf = f f t ( r , NN)
f = f f t f r e q (NN)
p l t . p l o t ( f , np . a b s ( Rf ) )
[<matplotlib.lines.Line2D at 0x7f5d27641d68>]
It remain to compare this to a discrete cardinal sinus. First we define a function and then compare the
results.
Page 42/255
3.2. EXAMPLES 43
def dsinc ( x , L) :
i f isinstance (x , ( int , f l o a t ) ) : x = [x]
x = np . a r r a y ( x )
o u t = np . o n e s ( np . s h a p e ( x ) )
I = np . where ( x ! = 0 )
o u t [ I ] = np . s i n ( x [ I ] ) / ( L ∗ np . s i n ( x [ I ] / L ) )
r e t u r n out
N = 1000
L = 40
f = np . l i n s p a c e ( − 0 . 5 , 0 . 5 , 4 0 0 )
p l t . p l o t ( f , dsinc ( pi ∗ L ∗ f , L) )
p l t . g r i d ( b= T r u e )
N = 1000
f = np . l i n s p a c e ( − 0 . 5 , 0 . 5 , 4 0 0 )
p l t . p l o t ( f , L ∗ np . a b s ( d s i n c ( p i ∗ L ∗ f , L ) ) )
f = f f t f r e q (NN)
p l t . p l o t ( f , np . a b s ( Rf ) )
p l t . g r i d ( b= T r u e )
Interactive versions. . .
# u s i n g %m a t p l o t l i b u s e a b a c k e n d t h a t a l l o w s e x t e r n a l f i g u r e s
# u s i n g %m a t p l o t l i b i n l i n e p l o t s t h e r e s u l t s i n t h e n o t e b o o k
%m a t p l o t l i b i n l i n e
s l i d e r = w i d g e t s . F l o a t S l i d e r ( min = 0 . 1 , max =100 , s t e p = 0 . 1 , v a l u e = 8 )
display ( slider )
#−−−−− C a l l b a c k s d e s w i d g e t s −−−−−−−−−−−−−
Page 43/255
44 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
def p l t s i n c ( change ) :
L = c h a n g e [ ’ new ’ ]
plt . clf ()
c l e a r _ o u t p u t ( w a i t =True )
# val . value= s t r ( f )
f = np . l i n s p a c e ( − 0 . 5 , 0 . 5 , 4 0 0 )
p l t . p l o t ( f , dsinc ( pi ∗ L ∗ f , L) )
p l t . ylim ([ −0.3 , 1 . 2 ] )
p l t . g r i d ( b= T r u e )
p l t s i n c ( { ’ new ’ : 8 } )
s l i d e r . o b s e r v e ( p l t s i n c , names = [ ’ v a l u e ’ ] )
Page 44/255
3.2. EXAMPLES 45
This is an example with matplotlib widgets interactivity, (instead of html widgets). The docs can be
found here
%m a t p l o t l i b
from m a t p l o t l i b . w i d g e t s i m p o r t S l i d e r
f i g , ax = p l t . s u b p l o t s ( )
f i g . s u b p l o t s _ a d j u s t ( bottom =0.2 , l e f t = 0 . 1 )
l i n e , = ax . p l o t ( f , d s i n c ( p i ∗ L ∗ f , L ) , lw = 2 )
# l i n e 2 , = ax . p l o t ( f , s i n c ( p i ∗L∗ f ) , lw =2 )
# l i n e 2 i s i n o r d e r t o compare w i t h t h e " t r u e " s i n c
ax . g r i d ( b= T r u e )
# l i n e 2 . s e t _ y d a t a ( s i n c ( p i ∗L∗ f ) )
s l i d e r . on_changed ( on_change )
Page 45/255
46 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
%m a t p l o t l i b i n l i n e
from numpy . f f t i m p o r t f f t , i f f t
N = 250
f0 = 0.1
NN = 1000
f i g , ax = p l t . s u b p l o t s ( 2 , 1 )
d e f p l o t _ s i n _ a n d _ t r a n s f o r m (N, f0 , ax ) :
t = np . a r a n g e (N)
s = np . s i n ( 2 ∗ p i ∗ f 0 ∗ t )
S f = f f t ( s , NN)
ax [ 0 ] . p l o t ( t , s )
f = np . f f t . f f t f r e q (NN)
ax [ 1 ] . p l o t ( f , np . a b s ( S f ) )
p l o t _ s i n _ a n d _ t r a n s f o r m (N, f0 , ax )
Interactive versions
# u s i n g %m a t p l o t l i b u s e a b a c k e n d t h a t a l l o w s e x t e r n a l f i g u r e s
# u s i n g %m a t p l o t l i b i n l i n e p l o t s t h e r e s u l t s i n t h e n o t e b o o k
%m a t p l o t l i b i n l i n e
sliderN = widgets . I n t S l i d e r (
d e s c r i p t i o n = "N" , min =1 , max =1000 , s t e p =1 , v a l u e =2 00 )
s l i d e r f 0 = widgets . F l o a t S l i d e r (
d e s c r i p t i o n = " f 0 " , min =0 , max = 0 . 5 , s t e p = 0 . 0 1 , v a l u e = 0 . 1 )
c1 = w i d g e t s . Checkbox ( d e s c r i p t i o n = " D i s p l a y t i m e s i g n a l " , v a l u e = T r u e )
c2 = w i d g e t s . Checkbox ( d e s c r i p t i o n = " D i s p l a y f r e q u e n c y s i g n a l " , v a l u e = T r u e )
# display ( sliderN )
# display ( sliderf0 )
N = 500
f0 = 0.1
t = np . a r a n g e (N)
Page 46/255
3.2. EXAMPLES 47
s = np . s i n ( 2 ∗ p i ∗ f 0 ∗ t )
S f = f f t ( s , NN)
f = np . f f t . f f t f r e q (NN)
#−−−−− C a l l b a c k s d e s w i d g e t s −−−−−−−−−−−−−
@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
d e f p l t s i n ( dummy ) :
# c l e a r _ o u t p u t ( w a i t =True )
N = sliderN . value
f0 = s l i d e r f 0 . value
t = np . a r a n g e (N)
s = np . s i n ( 2 ∗ p i ∗ f 0 ∗ t )
S f = f f t ( s , NN)
f = np . f f t . f f t f r e q (NN)
i f c1 . v a l u e :
plt . figure (1)
plt . clf ()
plt . plot ( t , s )
i f c2 . v a l u e :
plt . figure (2)
plt . clf ()
p l t . p l o t ( f , np . a b s ( S f ) )
p l t . show ( )
pltsin (8)
s l i d e r N . o b s e r v e ( p l t s i n , names= ’ v a l u e ’ )
s l i d e r f 0 . o b s e r v e ( p l t s i n , names= ’ v a l u e ’ )
c1 . o b s e r v e ( p l t s i n , names= ’ v a l u e ’ )
c2 . o b s e r v e ( p l t s i n , names= ’ v a l u e ’ )
d i s p l a y ( w i d g e t s . VBox ( [ s l i d e r N , s l i d e r f 0 , c1 , c2 , o u t ] ) )
Page 47/255
48 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
%m a t p l o t l i b t k
from m a t p l o t l i b . w i d g e t s i m p o r t S l i d e r
f i g , ax = p l t . s u b p l o t s ( )
f i g . s u b p l o t s _ a d j u s t ( bottom =0.2 , l e f t = 0 . 1 )
ax . g r i d ( b= T r u e )
def on_change ( f0 ) :
s = np . s i n ( 2 ∗ p i ∗ f 0 ∗ t )
S f = f f t ( s , NN)
l i n e . s e t _ y d a t a ( np . a b s ( S f ) )
# l i n e 2 . s e t _ y d a t a ( s i n c ( p i ∗L∗ f ) )
s l i d e r . on_changed ( on_change )
Page 48/255
3.3. SYMMETRIES OF THE FOURIER TRANSFORM. 49
%m a t p l o t l i b i n l i n e
Some definitions
x∗ (n) ⇌ X ∗ (− f ) .
This can be easily checked beginning with the definition of the Fourier transform:
x(−n) ⇌ X(− f ) .
Page 49/255
50 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
This last relation can be derived directly from the Fourier transform of x(−n)
∫ +∞
FT {x(−n)} = x(−n) e− j2π f t dt,
−∞
x∗ (−n) ⇌ X ∗ ( f ) .
To sum it all up, we have
x(n) ⇌ X( f )
x(−n) ⇌ X(− f )
x∗ (n) ⇌ X ∗ (− f )
x∗ (−n) ⇌ X ∗( f )
These relations enable to analyse all the symetries of the Fourier transform. We begin with the Hermitian
symmetry for real signals:
X( f ) = X ∗ (− f )
from that, we observe that if x(n) is real, then
• the real part of X( f ) is even,
• the imaginary part of X( f ) is odd,
• the modulus of X( f ), |X( f )| is even,
• the phase of X( f ), θ ( f ) is odd.
Moreover, if x(n) is odd or even (x(n) is not necessarily real), we have
The following table summarizes the main symmetry properties of the Fourier transform:
Finally, we have
Real even + imaginary odd ⇌ Real
Real odd + imaginary even ⇌ Imaginary
Page 50/255
3.4. TABLE OF FOURIER TRANSFORM PROPERTIES 51
Page 51/255
52 CHAPTER 3. FROM FOURIER SERIES TO FOURIER TRANSFORMS
Page 52/255
4
Filters and convolution
• Since δ (n − m) = 1 if and only if n = m, then all the terms in the sum cancel, excepted the one with
n = m and therefore we arrive at the identity x(n) = x(n).
• The set of delayed Dirac impulses δ (n − m) form a basis of the space of discrete signals. Then the
n=−∞ x(n)δ (n − m) = x(m). Hence, the
coordinate of a signal on this basis is the scalar product ∑+∞
reprentation formula just expresses the decomposition of the signal on the basis, where the x(m) are
the coordinates.
This means that x(n), as a waveform, is actually composed of the sum of many Dirac impulses, placed
at each integer, with a weight x(m) which is nothing but the amplitude of the signal at time m = n. The
formula shows how the signal can be seen as the superposition of Dirac impulses with the correct weights.
Lets us illustrate this with a simple Python demonstration:
L = 10
z = np . z e r o s ( L )
x = np . z e r o s ( L )
x [ 5 : 9 ] = range (4)
x [ 0 : 4 ] = range (4)
p r i n t ( " x= " , x )
s = np . z e r o s ( ( L , L ) )
for k in range (L) :
s [k][k] = x[k]
# t h i s i s e q u i v a l e n t a s s =np . d i a g ( x )
f , ax = p l t . s u b p l o t s ( L + 2 , f i g s i z e = ( 7 , 7 ) )
for k in range (L) :
ax [ k ] . s t e m ( s [ k ] [ : ] )
ax [ k ] . s e t _ y l i m ( [ 0 , 3 ] )
ax [ k ] . g e t _ y a x i s ( ) . s e t _ t i c k s ( [ ] )
i f k ! = L − 1 : ax [ k ] . g e t _ x a x i s ( ) . s e t _ t i c k s ( [ ] )
53
54 CHAPTER 4. FILTERS AND CONVOLUTION
ax [ L ] . a x i s ( ’ o f f ’ )
ax [ L + 1 ] . g e t _ y a x i s ( ) . s e t _ t i c k s ( [ ] )
ax [ L + 1 ] . s t e m ( x , l i n e f m t = ’ r ’ )
ax [ L + 1 ] . s e t _ t i t l e ( "Sum o f a l l e l e m e n t a r y s i g n a l s " )
#f . tight_layout ()
f . s u p t i t l e ( " D e c o m p o s i t i o n o f a s i g n a l i n t o a sum o f D i r a c " , f o n t s i z e =1 4 )
x= [0. 1. 2. 3. 0. 0. 1. 2. 3. 0.]
Page 54/255
4.2. THE CONVOLUTION OPERATION 55
+∞
y(n) = ∑ x(m)h(n − m) = [x ∗ h](n).
m=−∞
This relation is called convolution of x and h, and this operation is denoted [x ∗ h](t), so as to indicate
that the result of the convolution operation is evaluated at time n and that the variable m is simply a dummy
variable that disappears by the summation.
The convolution operation is important since it enables to compute the output of the system using only
its impulse response. It is not necessary to know the way the system is build, its internal design and
so on. The only thing one must have is its impulse response. Thus we see that the knowledge of the
impulse response enable to fully characterize the input-output relationships.
4.2.2 Illustration
We show numerically that the output of a system is effectively the weightened sum of delayed impulse
responses. This indicates that the output of the system can be computed either by using its difference
equation, or by the convolution of its input with its impulse response.
Direct response
d e f op3 ( s i g n a l ) :
t r a n s f o r m e d _ s i g n a l =np . z e r o s ( np . s i z e ( s i g n a l ) )
f o r t i n np . a r a n g e ( np . s i z e ( s i g n a l ) ) :
t r a n s f o r m e d _ s i g n a l [ t ] = 0 . 7 ∗ t r a n s f o r m e d _ s i g n a l [ t −1]+0.3∗ s i g n a l [ t ]
return transformed_signal
#
# rectangular pulse
N= 2 0 ; L = 5 ; M=10
r =np . z e r o s (N)
r [ L :M]=1
#
p l t . stem ( r )
p l t . s t e m ( op3 ( r ) , l i n e f m t = ’ r−’ , m a r k e r f m t = ’ r o ’ )
_= p l t . y l i m ( [ 0 , 1 . 2 ] )
Page 55/255
56 CHAPTER 4. FILTERS AND CONVOLUTION
u += 1
ax [ u ] . a x i s ( ’ o f f ’ )
#
#f . tight_layout ()
f . suptitle (
" C o n v o l u t i o n a s t h e sum o f a l l d e l a y e d i m p u l s e r e s p o n s e s " , f o n t s i z e = 14 )
4.2.3 Exercises
Exercise 3. 1. Compute by hand the convolution between two rectangular signals,
2. propose a python program that computes the result, given two arrays. Syntax: def
myconv(x,y): return z
3. Of course, convolution functions have already be implemented, in many languages, by many people
and using many algorithms. Implementations also exist in two or more dimensions. So, we do need to
reinvent the wheel. Consult the help of np.convolve and of sig.convolve (respectively from
numpy and scipy modules).
4. use this convolution to compute and display the convolution between two rectangular signals
Page 56/255
4.2. THE CONVOLUTION OPERATION 57
d e f myconv ( x , y ) :
L = np . s i z e ( x )
# we do i t i n t h e s i m p l e c a s e where b o t h s i g n a l s h a v e t h e same l e n g t h
a s s e r t np . s i z e ( x ) == np . s i z e (
y ) , " The two s i g n a l s must h a v e t h e same l e n g t h s "
# a s an e x e r c i s e , you c a n g e n e r a l i z e t h i s
z = np . z e r o s ( 2 ∗ L − 1 )
#
# # −> FILL IN
#
return z
# test it :
z = myconv ( np . o n e s ( L ) , np . o n e s ( L ) )
p r i n t ( ’ z= ’ , z )
z= [0. 0. 0. 0. 0. 0. 0. 0. 0.]
d e f myconv ( x , y ) :
L = np . s i z e ( x )
# we do i t i n t h e s i m p l a c a s e where b o t h s i g n a l s h a v e t h e same l e n g t h
a s s e r t np . s i z e ( x ) == np . s i z e (
y ) , " The two s i g n a l s must h a v e t h e same l e n g t h s "
# a s an e x e r c i s e , you c a n g e n e r a l i z e t h i s
z = np . z e r o s ( 2 ∗ L − 1 )
# d e l a y <L
f o r d e l a y i n np . a r a n g e ( 0 , L ) :
z [ d e l a y ] = np . sum ( x [ 0 : d e l a y + 1 ] ∗ y[−1:−1 − d e l a y − 1 : − 1 ] )
# d e l a y >=L
Page 57/255
58 CHAPTER 4. FILTERS AND CONVOLUTION
f o r d e l a y i n np . a r a n g e ( L , 2 ∗ L − 1 ) :
z [ d e l a y ] = np . sum ( x [ d e l a y + 1 − L : L ] ∗ y[− d e l a y − 1 + L : 0 : − 1 ] )
return z
# test it :
z = myconv ( np . o n e s ( L ) , np . o n e s ( L ) )
p r i n t ( ’ z= ’ , z )
z= [1. 2. 3. 4. 5. 4. 3. 2. 1.]
Text(0.5, 0, ’Delay’)
Page 58/255
Transfer function
5
Given a filter with input x(n) and output y(n), it is always possible to compute the Fourier transform of he
input and of the output, say X( f ) and Y ( f ). The ratio of these two quantities is called the transfer function.
For now, let us denote it by T ( f ). Interestingly, we will see that the transfer function do not depend on x,
and thus is a global characteristic of the system. More than that, we will see that the transfer function is
intimately linked to the impulse response of the system.
m
= X0 e j2π f0 n ∑ h(m)e− j2π f0 m .
m
We recognize above the expression of the Fourier transform of h(m) at the frequency f0 :
H( f0 ) = ∑ h(m)e− j2π f0 m .
m
59
60 CHAPTER 5. TRANSFER FUNCTION
Y ( f ) = X( f )H( f ) .
The time domain description, in terms of convolution product, becomes a simple product in the Fourier
domain.
[x∗h](n) ⇌ X( f )H( f ) .
It is easy to check that reciprocally,
x(n)h(n) ⇌ [X∗H]( f ) .
Try to check it as an exercise. You will need to introduce a convolution for function of a continuous variable,
following the model of the convolution for discrete signals.
Begin with the Fourier transform of x(n)y(n), and replace the signals by their expression as the inverse
Fourier transform:
It remains to note that the sum of exponentials is nothing but the Fourier transform of the complex exponen-
tial e jπ ( f1 + f2 )n , and thus that
∑ e jπ f1 n e jπ f2 n e− j2π f n = δ ( f − f1 − f2 ).
n
Therefore, the double integral above reduces to a simple one, since f2 = f − f1 , and we obtain
∫
FT[x(n)y(n)] = X( f1 )Y ( f − f1 )d f1 = [X ∗Y ]( f ).
(Another proof is possible, beginning with the inverse Fourier transform of the convolution [X ∗ Y ]( f ),
and decomposing the exponential so as to exhibit the inverse Fourier transform of x(n) and y(n)). Try it.
The transform of a convolution into a simple product, and reciprocally, constitutes the Plancherel theo-
rem:
[x ∗ y](t) ⇌ X( f )Y ( f ),
x(t)y(t) ⇌ [X ∗Y ]( f ).
5.2 Consequences
The Fourier transform of x(n)y(n)∗ is
∫
x(n)y(n)∗ ⇌ X(u)Y ( f − u)∗ du,
[1]
Page 60/255
5.2. CONSEQUENCES 61
This relation shows that the scalar product is conserved in the different basis for signals. This property is
called the Plancherel-Parseval theorem. Using this relation with y(n) = x(n), we have
+∞ ∫
∑ |x(n)| 2
= |X( f )|2 d f ,
−∞ [1]
Page 61/255
62 CHAPTER 5. TRANSFER FUNCTION
Page 62/255
Basic representations for digital signals and systems
6
Par J.-F. Bercher – march 5, 2014
In these exercises, we will work with digital signals. Experiments will be done with Python.
We will work with the filtering operation described by the following difference equation
3. Compute and plot the impulse responses for a = −0.8, a = 0.99, and a = 1.01. Conclu-
sions.
63
64 CHAPTER 6. BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
Page 64/255
Filtering
7
1. Create a sine wave x of frequency f 0 = 3, sampled at Fe = 32 on N = 128 points
– using a convolution, y2=lfilter(h,1,x); with h the impulse response of the filter for a = 0.8
Explain why this last operation effectively corresponds to a convolution. Compare the two
results.
3. Plot the transfer function and the Fourier transform of the sine wave. What will be the
result of the product? Measure the gain and phase of the transfer function at the frequency
of the sinusoid ( f0 = 3). Compare these values to the values of gain and phase measured
in the time domain.
4. Do this experiment again, but with a pulse train instead of a sine. This is done simply
in order to illustrate the fact that this time, the output of the filter is deformed. You
may use def rectpulse(x): ""“rectpulse(x): Returns a pulse train with period 2pi”"" return
sign(sin(x))
65
66 CHAPTER 7. FILTERING
Page 66/255
Lab – Basic representations for digital signals and systems
8
Par J.-F. Bercher – le 12 novembre 2013 English translation and update: february 21, 2014 – last update:
2018
%m a t p l o t l i b i n l i n e
# i m p o r t mpld3
# mpld3 . e n a b l e _ n o t e b o o k ( )
In these exercises, we will work with digital signals. Experiments will be done with Python.
We will work with the filtering operation described by the following difference equation
3. Compute and plot the impulse responses for a = −0.8, a = 0.99, and a = 1.01. Conclu-
sions.
from p y l a b i m p o r t ∗
We begin by creating a function that returns a Dirac impulse, and test the result
67
68 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
def dirac ( n ) :
" " " d i r a c ( n ) : r e t u r n s a D i r a c i m p u l s e on N p o i n t s " " "
d= z e r o s ( n ) ; d [ 0 ] = 1
return d
# Representation
N=100
s t e m ( r a n g e (N) , d i r a c (N) )
t i t l e ( " Dirac $ \ d e l t a ( n ) $" )
xlabel ( "n" )
ylim ( [ 0 , 1 . 2 ] ) # zoom f o r b e t t e r v i s u a l i z a t i o n
xlim ([ −5 , 1 0 ] )
(-5, 10)
Filter a data sequence, ‘x‘, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
Page 68/255
8.1. STUDY IN THE TIME DOMAIN 69
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ‘‘a[0]‘‘
is not 1, then both ‘a‘ and ‘b‘ are normalized by ‘‘a[0]‘‘.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
‘‘max(len(a), len(b)) - 1‘‘. If ‘zi‘ is None or is not given then
initial rest is assumed. See ‘lfiltic‘ for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If ‘zi‘ is None, this is not returned, otherwise, ‘zf‘ holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for ‘lfilter‘.
lfilter_zi : Compute initial state (steady state of step response) for
‘lfilter‘.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
where ‘M‘ is the degree of the numerator, ‘N‘ is the degree of the
Page 69/255
70 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
Page 70/255
8.1. STUDY IN THE TIME DOMAIN 71
>>> plt.figure
>>> plt.plot(t, xn, ’b’, alpha=0.75)
>>> plt.plot(t, z, ’r--’, t, z2, ’r’, t, y, ’k’)
>>> plt.legend((’noisy signal’, ’lfilter, once’, ’lfilter, twice’,
... ’filtfilt’), loc=’best’)
>>> plt.grid(True)
>>> plt.show()
according to the difference equation y(n) = ay(n − 1) + x(n) corresponds to the command
y=lfilter([1],[1, -a],x), whre, of course, x and a have been previously initialized.
⇒ In order to obtain the impulse response, one simply have to excite the system with an impulse!
a =0.8
N=100
x= d i r a c (N)
y= l f i l t e r ( [ 1 ] , [ 1 , −a ] , x )
stem ( y ) ,
t i t l e ( " I m p u l s e r e s p o n s e f o r a ={} " . f o r m a t ( a ) ) , x l a b e l ( " n " )
Page 71/255
72 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
First values
y[:6]= [1. 0.8 0.64 0.512 0.4096 0.32768]
to compare with a**n :
[1. 0.8 0.64 0.512 0.4096 0.32768]
We note that the experimental impulse response corresponds to the theoretical one, which is h(n) =
an .
We will check this for some other values of a.
To ease our explorations, we will first define a function that returns the impulse reponse, for two vectors
[b] and [a] describing any rational filter. It suffices to compute the filter’s output, with a Dirac at its input,
on a specified length:
def r i (b , a , n ) :
" " " R e t u r n s an i m p u l s e r e s p o n s e o f l e n g t h n ( i n t )
o f a f i l t e r w i t h c o e f f i c i e n t s a and b
"""
return l f i l t e r ( array (b) , array ( a ) , dirac (n) )
Text(0.5, 0, ’n’)
Conclusions:
Page 72/255
8.2. DISPLAY OF RESULTS 73
Page 73/255
74 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
# We w i l l n e e d t h e f f t f u n c t i o n s
from numpy . f f t i m p o r t f f t , i f f t
Page 74/255
8.4. FILTERING 75
g r i d ( b= T r u e )
x l i m ([ − Fe / 2 , Fe / 2 ] )
subplot (2 ,1 ,2)
p l o t ( f , a n g l e (H) , l a b e l =u " F r e q u e n c y R e s p o n s e " )
xlabel ( " Frequencies " )
t i t l e ( " Frequency Response ( phase ) " )
g r i d ( b= T r u e )
x l i m ([ − Fe / 2 , Fe / 2 ] )
fig . tight_layout () # a v o i d r e c o v e r i n g o f t i t l e s and l a b e l s
# V a l u e a t f =x : we l o o k f o r i t by f i n d ( f ==x )
p r i n t ( " V a l u e a t f =0 : " . r j u s t ( 2 0 ) ,H[ f i n d ( f ==0) ] . r e a l )
p r i n t ( " V a l u e a t f =Fe / 2 : " ,H[ f i n d ( f ==−Fe / 2 ) ] . r e a l )
p r i n t ( " To compare w i t h t h e o r e t i c a l v a l u e s " )
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:2: Matplotl
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:3: Matplotl
This is separate from the ipykernel package so we can avoid doing imports until
8.4 Filtering
1. Create a sine wave x of frequency f 0 = 3, sampled at Fe = 32 on N = 128 points
Page 75/255
76 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
– using a convolution, y2=lfilter(h,1,x); with h the impulse resonse of the filter for a = 0.8
Explain why this last operation effectively corresponds to a convolution. Compare the two
results.
# C r e a t i o n o f t h e s i m p l e s i n e wave
N, fo , Fe = 1 2 8 , 3 , 32
t = a r a n g e (N) / Fe
x= s i n ( 2 ∗ p i ∗ f o ∗ t )
figure (3)
plot ( t , x)
x l a b e l ( " Time " )
g r i d ( b= T r u e )
ylim ([ −1.2 , 1 . 2 ] )
(-1.2, 1.2)
# F i l t e r i n g with f i l t e r h
a =0.8
h= r i ( [ 1 ] , [ 1 , −a ] ,N) # h computed a g a i n , b u t on N p o i n t s
y1= l f i l t e r ( [ 1 ] , [ 1 , − 0 . 8 ] , x )
y2= l f i l t e r ( h , [ 1 ] , x )
figure ()
p l o t ( t , y1 , l a b e l = ’ y1 ’ )
p l o t ( t , y2 , l a b e l = ’ y2 ’ )
g r i d ( b= T r u e )
legend ( )
show ( )
Page 76/255
8.4. FILTERING 77
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
One can also plot the difference between the two signals, so things are clear!
figure ()
p l o t ( t , y1−y2 , l a b e l = ’ y1−y2 ’ )
x l a b e l ( " Time " )
g r i d ( b= T r u e )
legend ( )
We are now going to check Plancherel’s theorem which says that the Fourier transform of a convolu-
tion product is the product of the Fourier transforms. We will simply observe that the output of a system,
computed as the inverse Fourier transform of the product of the transfer function H with the Fourier trans-
form X of the input signal is identical (or at least extremely similar) to the output computed by convolution
or as solution of the difference equation.
y3= r e a l ( i f f t ( f f t ( h ) ∗ f f t ( x ) ) )
p l o t ( t , y3 , l a b e l = ’ y3 ’ )
p l o t ( t , y2 , l a b e l = ’ y2 ’ )
legend ( )
The difference observed at the beginning of the two plots comes from a different assumption on the
values of the signals at negative (non observed) times. Actually, functionlfilter assumes that the signal is
zero where non observed, which implies a transient response at the output of the filter. The Fourier transform
is computed with the algorithm of fft, which assumes that all signals are periodics, thus periodised outside
the observation interval. We will discuss this in more details later.
Page 77/255
78 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
Page 78/255
8.4. FILTERING 79
of the sinusoid ( f0 = 3). Compare these values to the values of gain and phase measured
in the time domain.
X= f f t s h i f t ( f f t ( x ) )
H= f f t s h i f t ( f f t ( h ) )
M= l e n ( x )
f = a r a n g e (M) /M∗ Fe −Fe / 2
p l o t ( f , a b s (H) , c o l o r = ’ g r e e n ’ , l a b e l = "H" )
s t e m ( f , a b s (X) ∗ 6 /M, m a r k e r f m t = ’ b ^ ’ , l a b e l = "X" )
x l i m ( [ − 16 , 1 6 ] )
x l a b e l ( " Frequency " )
legend ( )
The sine wave has frequency f o = 3. let us measure the values of gain and phase at this frequency:
H3=H[ f i n d ( f ==3) ]
p r i n t ( " V a l u e o f t h e complex g a i n : " , H3 )
p r i n t ( " Modulus : " , a b s ( H3 ) )
p r i n t ( " P h a s e ( d e g r e e s ) : " , a n g l e ( H3 ) / p i ∗ 1 8 0 )
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:1: Matplotl
"""Entry point for launching an IPython kernel.
Page 79/255
80 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
figure ()
p l o t ( t , x , t , y3 )
g r i d ( ’ on ’ )
/usr/local/lib/python3.5/site-packages/matplotlib/cbook/__init__.py:424: Matplotlib
Passing one of ’on’, ’true’, ’off’, ’false’ as a boolean is deprecated; use an actu
warn_deprecated("2.2", "Passing one of ’on’, ’true’, ’off’, ’false’ as a "
Measure of phase: we first measure the delay between the two signals
figure ()
p l o t ( t , x , l a b e l ="x" )
p l o t ( t , y3 , l a b e l = " y3 " )
legend ( )
g r i d ( ’ on ’ )
xlim ( [ 0 , 0 . 4 ] )
/usr/local/lib/python3.5/site-packages/matplotlib/cbook/__init__.py:424: Matplotlib
Passing one of ’on’, ’true’, ’off’, ’false’ as a boolean is deprecated; use an actu
warn_deprecated("2.2", "Passing one of ’on’, ’true’, ’off’, ’false’ as a "
(0, 0.4)
d e l t a T =min ( f i n d ( y3 > 0) ) / Fe
# x b e g i n s a t 0 , t h e d e l a y i s g i v e n by t h e f i r s t v a l u e where
# y3 becomes >0
p r i n t ( " The v a l u e o f t h e p h a s e d i f f e r e n c e , i n d e g r e e s , i s " , (2∗ pi ∗ fo ) ∗
d e l t a T / p i ∗ 1 8 0 , " ř " )
Page 80/255
8.4. FILTERING 81
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:1: Matplotl
"""Entry point for launching an IPython kernel.
Observations : We see that if the input is a sine wave, then the output is also a sine wave, up to a
gain and phase shift. These gain and phase corresponds exactly to the gain and phase given by the tranfer
function.
Page 81/255
82 CHAPTER 8. LAB – BASIC REPRESENTATIONS FOR DIGITAL SIGNALS AND SYSTEMS
Page 82/255
9
The continuous time case
• Section 9
– Section 9.1
∗ Section 12.3.1
∗ Section ??
∗ Section 9.1.3
∗ Section ??
– Section 9.2
83
84 CHAPTER 9. THE CONTINUOUS TIME CASE
that is
[ ] T2
e− j2π f t 1 [ jπ f T ]
X( f ) = A =A e − e− j π f T
− j2π f − T2 j2π f
so that finally
sin(π f T ) △
X( f ) = AT = AT sinc (π f T ). (9.1)
π fT
where sinc (.) is called a cardinal sinus. We note that this Fourier tyransform is peal and even. We
will see later that this property is true for the Fourier transforms of all real and even signals. The function
sinc (π f T ) vanishes for π f T = kπ , that is for f = k/T ; except for k = 0, since sinc (x) = 1 for x → 0.
Let us look at this sinc function (you may play with several values of the width):
%m a t p l o t l i b i n l i n e
def sinc ( x ) :
i f i s i n s t a n c e ( x , ( i n t , f l o a t ) ) : x =[ x ]
x=np . a r r a y ( x )
o u t =np . o n e s ( np . s h a p e ( x ) )
I =np . where ( x ! = 0 )
o u t [ I ] = np . s i n ( x [ I ] ) / x [ I ]
r e t u r n out
N=1000
f =np . l i n s p a c e ( − 0 . 5 , 0 . 5 , 4 0 0 )
p l t . p l o t ( f , s i n c ( p i ∗6∗ f ) )
p l t . g r i d ( b= T r u e )
#−−−−− C a l l b a c k s d e s w i d g e t s −−−−−−−−−−−−−
@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
def p l t s i n c ( value ) :
Page 84/255
9.1. THE CONTINUOUS TIME FOURIER TRANSFORM 85
# c l e a r _ o u t p u t ( w a i t =True )
T = s . value
p l t . p l o t ( f , s i n c ( p i ∗T∗ f ) )
p l t . g r i d ( b= T r u e )
p l t . show ( )
s = w i d g e t s . F l o a t S l i d e r ( min =0 , max =20 , s t e p = 0 . 1 , v a l u e = 8)
p l t s i n c ( ’ Width ’ )
s . observe ( pltsinc , ’ value ’ )
d i s p l a y ( w i d g e t s . VBox ( [ s , o u t ] ) )
#alternatively
# i n t e r a c t ( pltsinc , value=fixed (1) , T=[0.1 ,10 ,0.1])
Page 85/255
86 CHAPTER 9. THE CONTINUOUS TIME CASE
1 ⇌ δ(f)
• Second, consider a rectangular pulse with amplitude 1/T and width T . When T → O, this pulse tends
to a Dirac distribution, a mass at zero, with infinite amplitude but also with a unit integral. By the
Fourier transform of a rectangular pulse (9.1), we obtain that the Fourier transform of a Dirac function
is a unit constant
δ (t) ⇌ 1
%m a t p l o t l i b t k
from m a t p l o t l i b . w i d g e t s i m p o r t S l i d e r
f i g , ax = p l t . s u b p l o t s ( )
f i g . s u b p l o t s _ a d j u s t ( bottom =0.2 , l e f t = 0 . 1 )
s l i d e r . on_changed ( on_change )
Page 86/255
9.1. THE CONTINUOUS TIME FOURIER TRANSFORM 87
/usr/local/lib/python3.5/site-packages/matplotlib/cbook/__init__.py:424: Matplotlib
Passing one of ’on’, ’true’, ’off’, ’false’ as a boolean is deprecated; use an actu
warn_deprecated("2.2", "Passing one of ’on’, ’true’, ’off’, ’false’ as a "
Name x(t) X( f )
1 Linearity ∑i ai xi (t) ∑i ai Xi ( f )
2 Duality x(− f ) X(t)( )
3. Time and frequency scaling x(α t) 1
|α | S α
f
Page 87/255
88 CHAPTER 9. THE CONTINUOUS TIME CASE
Property 1. This property enables to express the Fourier transform of a delayed signal as a function of the
Fourier transform of the initial signal and a delay term:
Proof. This property can be obtained almost immediately from the definition of the Fourier transform:
∫ +∞
FT {x(t − t0 )} = x(t − t0 ) e− j2π f t dt;
−∞
that is ∫ +∞
− j2π f t0
FT {x(t − t0 )} = e x(t − t0 )e− j2π f (t−t0 ) dt = e− j2π f t0 X( f ).
−∞
Page 88/255
9.2. DIRAC IMPULSE, REPRESENTATION FORMULA AND CONVOLUTION 89
x(t)δ (t − t0 ) = x(t0 )δ (t − t0 ).
Consequently, ∫ +∞ ∫ +∞
x(t)δ (t − t0 )dt = x(t0 ) δ (t − t0 )dt = x(t0 ).
−∞ −∞
Therefore, we always have
{ ∫ +∞
x(t) = −∞ x(τ )δ (t − τ )dτ
∫ +∞
with x(τ ) = −∞ x(t)δ (t − τ )dt.
This is nothing but the continuous-time version of the representation formula.
The set of distributions {δτ (t) : δ (t − τ )}, forms an orthonormal basis and x(τ ) can be viewed as a
coordinate of x(t) on this basis. Indeed, the scalar product between x(t) and δτ (t) is nothing but
∫ +∞
x(τ ) =< x(t), δτ (t) >= x(t)δ (t − τ )dt,
−∞
and x(t) is then given as the sum of the basis functions, weighted by the associated coordinates:
∫ +∞
x(t) = x(τ )δ (t − τ )dτ .
−∞
Following the same approach as in the discrete case, we define the impulse response h(t) as the output
of a linear invariant system to a Dirac impulse. By linearity, the output of the system to any input x(t),
expressed using the representation formula, is
∫ +∞
y(t) = x(τ )h(t − τ )dτ = [x ∗ h](t).
−∞
This is the time-continuous convolution between x and h, denoted [x ∗ h](t). It enables to express the
output of the filter using only the input and the impulse response. This shows the importance of the impulse
response as a description of the system. The other notions we studied in the discrete case, namely transfer
function, Plancherel and Parseval theorems, etc, extends straightforwardly to the continuous case.
Page 89/255
90 CHAPTER 9. THE CONTINUOUS TIME CASE
Page 90/255
Periodization, discretization and sampling
10
10.1 Periodization-discretization duality
10.1.1 Relation between Fourier series and Fourier transform
Remember that we defined the Fourier transform as the limit of the Fourier series of periodic signal, when
the period tends to infinity. A periodic signal can also be viewed as the repetition of a basic pattern. This
enables to give a link between Fourier series and transform. Let x(n) be a periodic function with period L0 .
Then
+∞
x(n) = ∑ xL0 (n − mL0 ), (10.1)
m=−∞
where xL0 (n) is the basic pattern with length L0 . x(n) being periodic, it can be expressed using a Fourier
series, as
L0 −1
x(n) = ∑ ck e j2π k f n ,
0
n=0
where f0 = 1/L0 and
1
ck = ∑ xL0 (n)e− j2π k f0 n .
L0 [L ]0
that is
1 L0 −1
X( f ) = FT {x(n)} = ∑ XL0 (k f0 )δ ( f − k f0 ) .
L0 k=0
(10.4)
91
92 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
+∞
1 L0 −1 j2π k f0 n
∑ δ (n − mL0 ) = ∑e
L0 k=0
. (10.5)
m=−∞
The series of delayed Dirac impulses is called a Dirac comb. It is often denoted
+∞
wL0 (n) = ∑ δ (n − mL0 ). (10.6)
m=−∞
+∞
1 L0 −1
∑ e j2π f mL0 n = ∑ δ ( f − k f0 ) ;
L0 k=0
(10.7)
m=−∞
+∞
1 L0 −1
∑ δ (n − mL0 ) ⇌ ∑ δ ( f − k f0 ) .
L0 k=0
(10.8)
m=−∞
This last relation shows that the Fourier transform of a Dirac comb is also a Dirac comb, these two combs
having an inverse spacing.
Exercise 4. Let us check this numerically. This is very easy: define a Dirac comb, take its Fourier transform
using the fft function, and look at the result.
# # DO IT YOURSELF . . .
# DiracComb=
# DiracComb_f = f f t ( DiracComb )
# etc
N = 200
L0 = 5
DiracComb = np . z e r o s (N)
DiracComb [ : : L0 ] = 1
DiracComb_f = f f t ( DiracComb )
p l t . s t e m ( DiracComb )
p l t . ylim ( [ 0 , 1 . 1 ] )
p l t . x l a b e l ( " Time " )
plt . figure ()
f = np . l i n s p a c e ( 0 , 1 , N)
p l t . stem ( f , 1 / N ∗
a b s ( DiracComb_f ) ) # A c t u a l l y t h e r e i s a f a c t o r N i n t h e f f t
_ = p l t . y l i m ( [ 0 , 1 . 1 ∗ 1 / L0 ] )
p l t . x l a b e l ( " Frequency " )
Page 92/255
10.1. PERIODIZATION-DISCRETIZATION DUALITY 93
Page 93/255
94 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
Text(0.5, 0, ’Frequency’)
We may now go back to the exploration of the links between Fourier series and transform, using the
second Poisson formula (10.8).
Convolution with a delayed Dirac impulse - Let us first look at the result of the convolution of any
function with a delayed Dirac impulse: let us denote δn0 (n) = δ (n − n0 ). The convolution [x ∗ δn0 ](n) is eq
given by
This is nothing but the expression of a periodic signal. If L0 is larger than the support L of xL (n), then x(n)
is simply the repetition, with a period L0 , of the pattern xL (n).
Convolution with a Dirac comb periodizes the signal.
Exercise 5. Let us check this with some simple Python commands: create a Dirac comb, a test signal (e.g.)
a rectangular pulse, convolve the two signals and plot the result. Experiment with the value L of the length
of the test signal.
# DO IT YOURSELF!
# DiracComb=
# pulse=
#...
# z=np . c o n v o l v e ( DiracComb , p u l s e )
# p l t . stem ( . . . )
N= 4 0 0 ; L0 = 2 0 ; L=6 # L i s t h e l e n g t h o f t h e p u l s e
DiracComb=np . z e r o s (N)
DiracComb [ : : L0 ] = 1
p u l s e =np . z e r o s ( 4 0 ) ; p u l s e [ 0 : L] = 1 # o r r a n g e ( L ) # <<−−
Page 94/255
10.1. PERIODIZATION-DISCRETIZATION DUALITY 95
z=np . c o n v o l v e ( DiracComb , p u l s e )
p l t . stem ( z [ 0 : 1 0 0 ] )
p l t . t i t l e ( ’ C o n v o l u t i o n w i t h a D i r a c comb ’ )
p l t . x l a b e l ( ’ Time ’ )
Text(0.5, 0, ’Time’)
We see that the convolution with the Dirac comb effectively periodizes the initial pattern. In the case
where the support L of the pulse if larger than the period L0 of the comb, then the result presents aliasing
between consecutive patterns (but the resulting signal is still periodic).
Effect in the frequency domain - In the frequency domain, we know, by the Plancherel theorem, that the
product of signals results in the convolution of their Fourier transforms (and vice versa). As a consequence,
Since the Fourier transform of a Dirac comb is also a Dirac comb, we obtain that
1
x(n) = [xL ∗ wL0 ] (n) ⇌ XL ( f ). w 1 ( f ),
L0 L0
or
1 1
L0 ∑
X( f ) = XL ( f ). w 1 (f) = XL (k f0 )δ ( f − k f0 ),
L0 L0 k
with f0 = 1/L0 . We see that the Fourier transform of the periodized signal is the product of the Fourier
transform of the initial pattern with a Dirac comb in frequency. Hence, periodization in the time domain
results in a discretization of the frequency axis, yielding a Fourier transform constituted of spectral lines.
Observe that the amplitudes of the spectral lines coincide with the Fourier series coefficients. hence it is
immediate to find the Fourier series coefficients from the Fourier transform of the periodized pattern.
Page 95/255
96 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
Exercise 6. Continue the exercise 5 by an analysis of what happens in the Fourier domain: compute the
Fourier transforms of the original and periodized signals and compare them on the same plot. The Fourier
transform of the periodized signal should be computed without zero padding, ie exactly on N points.
You will have to introduce a factor to account for the fact that there is more signal in the periodized one
yan in the initial - the factor to consider is simply the number of periods.
#
N = 200
MM = 2000 # f o r z e r o p a d d i n g
plt . figure ()
f = np . l i n s p a c e ( 0 , 1 , MM)
f n = np . l i n s p a c e ( 0 , 1 , N)
#
# FILL IN HERE
#
p l t . t i t l e ( ’ F o u r i e r t r a n s f o r m o f o r i g i n a l and p e r i o d i z e d p u l s e s ’ )
_ = x l a b e l ( ’ Frequency ’ )
%m a t p l o t l i b i n l i n e
N = 200
L0 = 20
L = 12 # L i s t h e l e n g t h o f t h e p u l s e
DiracComb = np . z e r o s (N)
DiracComb [ : : L0 ] = 1
p u l s e = np . z e r o s ( 4 0 )
p u l s e [ 0 : L ] = 1 # exp ( −0.3∗ a r a n g e ( L ) )
z = np . c o n v o l v e ( DiracComb , p u l s e )
p l t . stem ( z [ 0 : 2 0 0 ] )
plt . t i t l e ( ’ Periodized signal ’ )
p l t . x l a b e l ( ’ Time ’ )
Page 96/255
10.2. THE DISCRETE FOURIER TRANSFORM 97
#
MM = 1000
plt . figure ()
f = np . l i n s p a c e ( 0 , 1 , MM)
f n = np . l i n s p a c e ( 0 , 1 , N)
p l t . p l o t ( f , 10 ∗ a b s ( f f t ( p u l s e , MM) ) , l a b e l = " FT o r i g i n a l s i g n a l " )
p l t . s t e m ( fn , a b s ( f f t ( z , N) ) , ’−o r ’ , l a b e l = " FT p e r i o d i z e d s i g n a l " )
p l t . legend ( )
p l t . t i t l e ( ’ F o u r i e r t r a n s f o r m o f o r i g i n a l and p e r i o d i z e d p u l s e s ’ )
_ = x l a b e l ( ’ Frequency ’ )
k=0
(10.17)
N−1
1
with X(k) =
N ∑ x(n)e − j2π kn
N .
n=0
In this section, we show that the DFT can also be viewed as a sampled version of the discrete-time Fourier
transform or as a simple change of basis for signal representation. We indicate that the assumption of
periodized signal in the time-domain implies some caution when studying some properties of the DFT,
namely time shifts or convolution.
Page 97/255
98 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
10.2.1 The Discrete Fourier Transform: Sampling the discrete-time Fourier transform
Given what we learned before, it is very easy to see that the DFT is indeed a sampled version of the discrete-
time Fourier transform. We know that periodizing a signal can be interpreted as a convolution of a pattern
with a Dirac comb. In turn, this implies in the frequency domain a multiplication of the Fourier transform
of the initial pattern with a Dirac comb: if we denote x0 (n) the signal for n ∈ [0, N),
and
1
X( f ) = X0 ( f ). w 1 ( f ) (10.18)
N N
( )
1 N−1 k
= X0 ( f ). ∑ δ f − (10.19)
N k=0 N
( ) ( )
1 N−1 k k
= ∑ X0 δ f− (10.20)
N k=0 N N
(10.21)
Page 98/255
10.2. THE DISCRETE FOURIER TRANSFORM 99
since the integration with the Dirac distribution yields the value of the function for the argument where the
Dirac is nonzero. It simply remains to note that
( ) +∞
k
= ∑ x0 (n)e− j2π N
kn
X0 f = (10.25)
N n=−∞
N−1
∑ x0 (n)e− j2π
kn
= N (10.26)
n=0
(10.27)
( )
since x(n) = x0 (n) on the interval [0, N). Denoting X(k) = X0 f = Nk , we arrive at the formulas (10.17) for
the DFT.
We illustrate this numerically. We look at the Fourier transform of a sine wave, with and without zero-
padding. In the first case, we obtain something that represents the discrete-time Fourier transform, and which
exhibits the consequence of the time-limitation of the signal. In the second case, we obtain the samples of
the DFT.
##
# e x p e r i m e n t s on DFT : t h e DFT a s s a m p l e d FT
N = 50 # F o u r i e r r e s o l u t i o n : 1 /N
f o = 0 . 0 7 # n o t on t h e F o u r i e r g r i d
t = a r a n g e (N)
s = s in (2 ∗ pi ∗ fo ∗ t )
Sz = f f t ( s , 1 0 0 0 )
f = a r a n g e ( 1 0 0 0 ) / 1000
p l o t ( f , a b s ( Sz ) , lw =2 , c o l o r = " b l u e " )
S = fft (s)
f 2 = a r a n g e (N) / N
s t e m ( f2 , a b s ( S ) , lw =2 , l i n e f m t = ’ g−’ , m a r k e r f m t = ’ go ’ )
p l o t ( f2 , a b s ( S ) , ’ r−’ )
xlabel ( " Frequencies " )
# Here we p l a y w i t h a n n o t a t i o n s and a r r o w s . . .
annotate (
" T r u e F o u r i e r t r a n s f o r m \ n ( z e r o −p a d d e d d a t a ) " ,
xy = ( 0 . 0 7 5 , 2 1 ) ,
x y t e x t =(0.11 , 23) ,
arrowprops= d i c t (
a r r o w s t y l e = "−>" ,
c o l o r =" blue " ,
c o n n e c t i o n s t y l e =" arc3 , rad =0.2 " ,
s h r i n k A =5 ,
s h r i n k B = 10 ) )
annotate (
" S a m p l e s on t h e DFT \ n g r i d " ,
xy = ( 0 . 0 8 , 1 5 ) ,
x y t e x t =(0.13 , 17) ,
arrowprops= d i c t (
a r r o w s t y l e = "−>" ,
c o l o r =" green " ,
c o n n e c t i o n s t y l e =" arc3 , rad =0.2 " ,
s h r i n k A =5 ,
s h r i n k B = 10 ) )
annotate (
" A p p a r e n t FT . . " ,
xy = ( 0 . 0 9 , 1 0 ) ,
Page 99/255
100 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
---------------------------------------------------------------------------
<ipython-input-2-b4669398e50a> in <module>()
10 S = fft(s)
11 f2 = arange(N) / N
---> 12 stem(f2, abs(S), lw=2, linefmt=’g-’, markerfmt=’go’)
13 plot(f2, abs(S), ’r-’)
14 xlabel("Frequencies")
Thus we note that without caution and analysis, it is easy to be mistaken. A zero-padding – i.e. compute
the FT padded with zeros, often enable to avoid bad interpretations.
Page 100/255
10.2. THE DISCRETE FOURIER TRANSFORM 101
1 [ − j2π k ]
k(N−1) T
ek = √ 1, e N ,...,e − j2 π kl
N ,...,e − j2π N
N
also for a basis of the same space. It is a simple exercise to check that ek T el = δ (k − l). Thus it is
possible to express x in the basis of complex exponentials. The coordinate X(k) of x on the vector ek is
given by the scalar product ek + x, where + denotes transposition and complex conjugation. If we denote
[ ]
F = e0 , e1 , . . . eN−1
the Fourier matrix, then we can note that F+ F = 1, which means that F is a unitary matrix – and that
in particular F−1 = F+ . Then, the change of basis to the basis of exponentials can be expressed as
X = F+ x (10.28)
and x can be expressed in terms of the X(k) as
x = FX. (10.29)
Developing line k of (10.28), we obtain
1 N−1
X(k) = √ ∑ x(n)e− j2π N ,
kn
N n=0
and developing line n of (10.29), we obtain
1 N−1
x(n) = √ ∑ X(k)e j2π N .
kn
N k=0
Up to a simple factor (let eg X ′ (k) = √1 X(k))
N
we recover the formulas (10.17) of the DFT.
p l t . rcParams [ ’ f i g u r e . f i g s i z e ’ ] = (8 , 6)
Page 101/255
102 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
Xs ( f ) = ∑ xs (n)e− j2π f n .
n
Taking into account that only the samples at indexes n = kN0 are nonzeros, we may denote xd (k) = xs (kN0 )
(d for ‘downsampled’), and make the change of variable n = kN0
The Fourier transform of the downsampled signal is simply a scaled version of the Fourier transform of the
sampled signal. Hence, they contain the very same information. In order to understand what happens in the
sampling/downsampling operation, we thus have to focus on the sampling operation, that is step 1. above.
The sampled signal is
xs (n) = x(n).wN0 (n).
By Plancherel’s theorem, we have
As in the discrete case, the continuous convolution with a Dirac comb results in the periodization of the
initial pattern, that is
1 k
N0 ∑
Xs ( f ) = X( f − ).
k N0
Page 102/255
10.4. ILLUSTRATION 1 103
10.4 Illustration 1
Let us illustrate this on our test signal:
l o a d s i g = np . l o a d ( " s i g n a l . npz " ) # l o a d t h e s i g n a l
x = loadsig [ "x" ]
N = len (x)
#
M = 8 ∗ N # Used f o r f f t c o m p u t a t i o n s
# D e f i n i t i o n o f v e c t o r s t and f
t = np . a r a n g e (N)
f = np . l i n s p a c e ( − 0 . 5 , 0 . 5 , M)
# Plot time s i g n a l
plot ( t , x)
t i t l e ( ’ Time S i g n a l ’ )
p l t . g r i d ( True )
plt . figure ()
# plot frequency signal
x f = f f t s h i f t ( f f t ( x , M) )
p l o t ( f , abs ( xf ) )
t i t l e ( ’ Frequency Signal ’ )
xlabel ( ’ Frequencies ’ )
p l t . g r i d ( True )
We first define a subsampler function, that takes for argument the signal x and the subsampling factor k.
d e f subsampb ( x , k , M= l e n ( x ) ) :
""" Subsampling with a f a c t o r k
R e t u r n s t h e s u b s a m p l e d s i g n a l and i t s F o u r i e r t r a n s f o r m " " "
x s = np . z e r o s ( np . s h a p e ( x ) )
xs [ : : k ] = x [ : : k ]
Page 103/255
104 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
x s f = f f t s h i f t ( f f t ( xs , M) )
r e t u r n ( xs , x s f )
10.5 Illustration 2
%m a t p l o t l i b i n l i n e
@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
def sampling_experiment ( val ) :
k = slide_k . value
f i g , bx= p l t . s u b p l o t s ( 2 , 1 , f i g s i z e = ( 8 , 6 ) )
# c l e a r _ o u t p u t ( w a i t =True )
bx [ 0 ] . p l o t ( t , x , l a b e l = ’ O r i g i n a l S i g n a l ’ )
( xs , x s f ) = subsampb ( x , k ,M)
bx [ 0 ] . s t e m ( t , xs , l i n e f m t = ’ g−’ , m a r k e r f m t = ’ bo ’ , b a s e f m t = ’ b−’ , l a b e l = ’
Subsampled S i g n a l ’ )
bx [ 0 ] . s e t _ x l a b e l ( ’ Time ’ )
bx [ 0 ] . l e g e n d ( )
#
bx [ 1 ] . p l o t ( f , a b s ( x f ) , l a b e l = ’ O r i g i n a l S i g n a l ’ )
# x e f = subsampb ( x , k ) [ 1 ]
bx [ 1 ] . p l o t ( f , k∗ a b s ( x s f ) , l a b e l = ’ F o u r i e r t r a n s f o r m o f s u b s a m p l e d s i g n a l ’ )
# The f a c t o r k a b o v e t a k e s i n t o a c c o u n t t h e power l o s t by s u b s a m p l i n g
x l a b e l ( ’ Frequency ’ )
bx [ 1 ] . l e g e n d ( l o c = ( 0 . 6 , 0 . 8 5 ) )
f i g . s u p t i t l e ( " E f f e c t o f s a m p l i n g on t i m e and f r e q u e n c y d o m a i n s " , f o n t s i z e
= 1 4)
Page 104/255
10.5. ILLUSTRATION 2 105
# tight_layout ()
p l t . show ( )
d i s p l a y ( w i d g e t s . VBox ( [ s l i d e _ k , o u t ] ) )
sampling_experiment ( ’ ’ )
slide_k . observe ( sampling_experiment , ’ value ’ )
%m a t p l o t l i b t k
from m a t p l o t l i b . w i d g e t s i m p o r t S l i d e r
f i g , ax = p l t . s u b p l o t s ( 2 , 1 )
f i g . s u b p l o t s _ a d j u s t ( bottom =0.2 , l e f t = 0 . 1 )
Page 105/255
106 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
l i n e x f , = ax [ 1 ] . p l o t ( f , a b s ( x f ) , lw = 2)
l i n e x f _ u p d a t e , = ax [ 1 ] . p l o t (
f , k ∗ abs ( xsf ) , l a b e l = ’ F o u r i e r t r a n s f o r m of subsampled s i g n a l ’ )
# m a r k e r s x _ u p d a t e , s t e m s x _ u p d a t e , _= ax [ 0 ] . s t e m ( t , xs , l i n e f m t = ’ g − ’ , m a r k e r f m t = ’ bo
’ , b a s e f m t = ’ b − ’ , l a b e l = ’ Su b s a m p l e d S i g n a l ’ )
linex_update , = plt_stem (
t , x , ’−o r ’ , ax = ax [ 0 ] ) # ax [ 0 ] . p l o t ( t , xs , ’ ob ’ , l a b e l = ’ S u b s a m p l e d S i g n a l
’)
l i n e x , = ax [ 0 ] . p l o t ( t , x , l a b e l = ’ O r i g i n a l S i g n a l ’ )
ax [ 0 ] . s e t _ x l a b e l ( ’ Time ’ )
ax [ 0 ] . l e g e n d ( )
# line2 , = ax . p l o t ( f , s i n c ( p i ∗L∗ f ) , lw =2 )
# line2 i s i n o r d e r t o compare w i t h t h e " t r u e " s i n c
ax [ 0 ] . g r i d ( b= T r u e )
ax [ 1 ] . g r i d ( b= T r u e )
def on_change ( k ) :
k = i n t ( round ( k ) )
( xs , x s f ) = subsampb ( x , k , M)
l i n e x f _ u p d a t e . s e t _ y d a t a ( k ∗ abs ( xsf ) )
xxs = z e r o s (3 ∗ l e n ( xs ) )
xxs [1: −1:3] = xs
l i n e x _ u p d a t e . s e t _ y d a t a ( xxs )
s l i d e r . on_changed ( on_change )
Page 106/255
10.6. THE SAMPLING THEOREM 107
X( f ) = 0 for | f | > B
with f ∈ [− 12 , 12 ] for discrete time signals. Then, after sampling at rate fs , the Fourier transform is the
periodic summation of the original spectrum.
Xs ( f ) = fs ∑ X( f − k fs ). (10.33)
k
%m a t p l o t l i b i n l i n e
p l t . figure ( f i g s i z e =(7 ,2) )
p l t . p l o t ( f , k∗ a b s ( x s f ) , l a b e l = ’ F o u r i e r t r a n s f o r m o f s u b s a m p l e d s i g n a l ’ )
p l t . xlim ( [ − 0 . 5 , 0 . 5 ] )
_= p l t . x t i c k s ( [ − 1 / 2 , −1/3 , −0.16 , 0 , 0 . 1 6 , 1 / 3 , 1 / 2 ] ,
[ ’ $ − \ \ f r a c {1}{2} $ ’ , ’−$ f _ s $ ’ , ’−$B$ ’ , ’ $0$ ’ , ’ $B$ ’ , ’ $ f _ s $ ’ , ’ $ \ \ f r a c {1}{2} $
’],
f o n t s i z e =1 4)
Hence, provided that there is no aliasing between consecutive images, it will be possible to retrieve the
initial Fourier transform from this periodized signal. This is a fundamental result, which is known as the
Shannon-Nyquist theorem, or sampling theorem.
In the frequency domain, this simply amounts to introduce a filter H( f ) that only keeps the frequencies
in [− fs /2, fs /2]: {
H( f ) = 1 for | f | < fs /2
H( f ) = 0 for | f | > fs /2
Xs ( f ).H( f ) = fs X( f )
Page 107/255
108 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
and we are able to recover X( f ) up to a simple factor. Of course, since we recover our signal in the
frequency domain, we can also get it in the time domain by inverse Fourier transform. By Plancherel’s
theorem, it immediately comes
x(n) = Ts [xs ∗ h](n),
with Ts = 1/ fs . A simple computation gives us the expression of the impulse response h as the inverse
Fourier transform of a rectangular pulse of width fs :
∫
h(n) = rect fs ( f )e j2π f n d f (10.34)
[1]
∫ fs
e j2π f n d f
2
= (10.35)
− f2s
sin(π fs n)
= fs (10.36)
π fs n
+∞
sin(π fs (n − kTs ))
x(n) = ∑ x(kTs )
π fs (n − kTs )
.
k=−∞
This formula shows that it is possible to perfectly reconstruct a bandlimited signal from its samples,
provided that the sampling rate fs is more than twice the maximum frequency B of the signal. Half the
sampling frequency, fs /2 is called the Nyquist frequency, while the minimum sampling frequency is the
Nyquist rate.
The Shannon-Nyquist theorem can then be stated as follows:
fs > 2B.
It is then possible to perfectly reconstruct the original signal from its samples, through the Shannon-Nyquist
interpolation formula
+∞
sin(π fs (n − kTs ))
x(n) = ∑ x(kTs )
π fs (n − kTs )
.
k=−∞
Page 108/255
10.6. THE SAMPLING THEOREM 109
fs > 2B.
In such case, it is possible to perfectly recover the original signal from its samples, using the reconstruction
formula
+∞
sin(π fs (t − kTs ))
x(t) = ∑ x(kTs ) .
k=−∞ π fs (t − kTs )
10.6.3 Illustrations
Exercise 7. Here we want to check the Shannon interpolation formula for correctly sampled signals:
+∞
sin(π fs (n − kTs ))
x(n) = ∑ x(kTs )
π fs (n − kTs )
.
k=−∞
In order to do that, you will first create a sinusoid with frequency f0 (eg f0 = 0.05). You will sample
this sine wave at 4 samples per period ( fs = 4 f0 ). Then, you will implement the interpolation formula
and will compare the approximation (finite number of terms in the sum) to the intial signal. The numpy
module provides a sinc function, but you should beware to the fact that the definition used includes the π :
sinc(x) = sin(π x)/(π x)
You have to study, complete the following script and implement the interpolation formula.
N = 4000
t = np . a r a n g e (N)
f o = 0 . 0 5 #−−> 1 / f o =20 s a m p l e s p e r p e r i o d e
x = sin (2 ∗ pi ∗ fo ∗ t )
t s = np . a r a n g e ( 0 , N, 4 ) # 5 s a m p l e s p e r p e r i o d e
x s = x [ : : 4 ] # downsampling , 5 s a m p l e s p e r p e r i o d e
num = np . s i z e ( t s ) # number o f s a m p l e s
Ts , Fs = 4 , 1 / 4
x _ r e c = z e r o s (N) # r e c o n s t r u c t e d s i g n a l
#
# IMPLEMENT HERE THE RECONSTRUCTION FORMULA x _ r e c = . . .
#
# Plotting the r s u l t s
p l t . p l o t ( t , x_rec , l a b e l =" r e c o n s t r u c t e d s i g n a l " )
p l t . p l o t ( t s , xs , ’ r o ’ , l a b e l = " S a m p l e s " )
p l t . p l o t ( t , x , ’−g ’ , l a b e l = " I n i t i a l S i g n a l " )
p l t . x l a b e l ( " Time " )
p l t . xlim ( [ 1 0 0 , 200])
Page 109/255
110 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
p l t . legend ( )
plt . figure ()
plt . p l o t ( t , x − x_rec )
plt . t i t l e ( " Reconstruction error " )
plt . x l a b e l ( " Time " )
_ = p l t . xlim ( [ 1 0 0 , 200])
N = 300
t = np . a r a n g e (N)
f o = 0 . 0 5 #−−> 1 / f o =20 s a m p l e s p e r p e r i o d
x = sin (2 ∗ pi ∗ fo ∗ t )
Page 110/255
10.6. THE SAMPLING THEOREM 111
t s = np . a r a n g e ( 0 , N, 4 ) # 5 s a m p l e s p e r p e r i o d
num = np . s i z e ( t s ) # number o f s a m p l e s
x s = x [ : : 4 ] # downsampling , 5 s a m p l e s p e r p e r i o d
Ts , Fs = 4 , 1 / 4
x _ r e c = z e r o s (N) # r e c o n s t r u c t e d s i g n a l
f o r k i n r a n g e ( num ) :
x _ r e c = x _ r e c + x s [ k ] ∗ np . s i n c ( Fs ∗ ( t − k ∗ Ts ) ) # ! The s i n c i n c l u d e s
the pi
plt . figure ()
plt . p l o t ( t , x − x_rec )
plt . t i t l e ( " Reconstruction error " )
plt . x l a b e l ( " Time " )
_ = p l t . xlim ( [ 1 0 0 , 200])
We observe that there still exists a very small error, but an existing one, and if we look carefully at it, we
may observe that the error is more important on the edges of the interval.
plt . figure ()
p l t . p l o t ( t , x−x _ r e c )
plt . t i t l e ( " Reconstruction error " )
_= p l t . x l i m ( [ 0 , 1 0 0 ] )
Actually, there is a duality between the time and frequency domains which implies that
signals with a finite support in one domain have an infinite support in the other.
Consequently, a signal cannot be limited simultaneously in both domains. In the case of our previous
sine wave, when we compute the Discrete-time Fourier transform (3.1), we implicitly suppose that the signal
Page 111/255
112 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
Page 112/255
10.6. THE SAMPLING THEOREM 113
is zero out of the observation interval. Therefore, its Fourier transform has infinite support and time sampling
will result in (a small) aliasing in the frequency domain.
It thus seems that it is not possible to downsample time-limited discrete signals without (a perhaps very
small) loss. Actually, we will see that this is still possible, using subband coding.
Analysis of the aliasing due to time-limited support.
We first zero-pad the initial signal; - this emphasizes that the signal is time-limited - and enables to look
at what happens at the edges of the support
bigN = 1000
x _ e x t e n d e d = np . z e r o s ( bigN )
x _ e x t e n d e d [ 2 0 0 : 2 0 0 + N] = x
#
t = np . a r a n g e ( 0 , bigN ) #
t s = np . a r a n g e ( 0 , bigN , 4 ) #
num = np . s i z e ( t s ) # number o f s a m p l e s
x s = x _ e x t e n d e d [ : : 4 ] # downsampling , 5 s a m p l e s p e r p e r i o d e
# Reconstruction
Ts , Fs = 4 , 1 / 4
x _ r e c = z e r o s ( bigN ) # r e c o n s t r u c t e d s i g n a l
f o r n i n r a n g e ( num ) :
x _ r e c = x _ r e c + x s [ n ] ∗ np . s i n c ( Fs ∗ ( t − n ∗ Ts ) ) # ! The s i n c i n c l u d e s
the pi
# Plotting the r e s u l t s
p l t . p l o t ( x_extended , l a b e l =" I n i t i a l s i g n a l " )
p l t . p l o t ( t , x_rec , l a b e l =" R e c o n s t r u c t e d s i g n a l " )
p l t . legend ( )
plt . figure ()
p l t . p l o t ( x_extended , l a b e l =" I n i t i a l s i g n a l " )
p l t . p l o t ( t , x_rec , l a b e l =" R e c o n s t r u c t e d s i g n a l " )
p l t . xlim ( [ 4 5 0 , 550])
_ = p l t . legend ( )
Page 113/255
114 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
x x s = np . z e r o s ( np . s i z e ( x _ e x t e n d e d ) )
xxs [ : : 4 ] = x_extended [ : : 4 ]
x f = np . a b s ( f f t ( x _ e x t e n d e d , 4 0 0 0 ) )
x x s f = 4 ∗ np . a b s ( f f t ( xxs , 4 0 0 0 ) )
f = np . l i n s p a c e ( 0 , 1 , 4 0 0 0 )
# Plotting
p l t . p l o t ( f , xf , l a b e l = " I n i t i a l s i g n a l " )
p l t . ylim ( [ 0 , 4 0 ] )
_ = p l t . xlim ( [ 0 , 1 / 2 ] )
# p l t . p l o t ( f , x x s f , l a b e l =" Sampled s i g n a l " )
# Details
plt . figure ()
p l t . p l o t ( f , xf , l a b e l = " I n i t i a l s i g n a l " )
p l t . p l o t ( f , x x s f , l a b e l = " Sampled s i g n a l " )
p l t . legend ( )
p l t . ylim ( [ 0 , 4 0 ] )
_ = p l t . xlim ( [ 0 , 1 / 4 ] )
We see that - we have infinite support in the frequency domain, the graph of the initial signal shows
that it is not band-limited. - This implies aliasing: the graph of the Fourier transform of the sampled signal
clearly shows that aliasing occurs, which modifies the values below fs /2 = 0.125.
Page 114/255
10.7. LAB ON BASICS IN IMAGE PROCESSING 115
Page 115/255
116 CHAPTER 10. PERIODIZATION, DISCRETIZATION AND SAMPLING
In particular, we will look at the problems of representation and filtering, both in the direct (spatial)
domain and in the transformed (spatial frequencies) domain. Next we will look at the problems of sampling
and filtering.
Within Python, the modules scipy.signal and scipy.ndimage will be useful.
L’objectif de ce laboratoire est de montrer comment les notions que nous avons découvertes dans le cas
monodimensionnel - c’est-à-dire pour les signaux, peuvent être étendues au cas bidimensionnel. Cela permet
également d’avoir un nouveau regard sur ces notions et peut-être contribuer à renforcer leur compréhension.
En particulier, nous examinerons les problèmes de représentation et de filtrage, à la fois dans le do-
maine direct (spatial) et dans le domaine transformé (fréquences spatiales). Ensuite, nous examinerons les
problèmes d’échantillonnage et de filtrage.
Dans Python, les modules scipy.signal etscipy.ndimage seront utiles.
In order to facilitate your learning and work, your servant has prepared a bunch of useful functions,
namely:
imshow(S,cmap=’gray’,origin=’upper’)
You may either display your graphics inline (it is the default) or using external windows; for that call
Page 116/255
11
Digital filters
11.0.1 Introduction
To be continued
Note that poles can occur at z = ∞. Recall that for z = exp( j2π f ), the Z-transform reduces to the Fourier
transform:
H(z = e j2π f ) = H( f ).
Example 2. Examples of rational fractions
117
118 CHAPTER 11. DIGITAL FILTERS
Exercise 8. Give the difference equations corresponding to the previous examples, and compute the in-
verse Z-transforms (impulse responses). In particular, show that for the last transfer function, the impulse
response is rectN (n).
Property 2. For any polynomial with real coefficients, the roots are either reals or appear by complex
conjugate pairs.
Proof. Let
L−1
P(z) = ∑ pk zk .
k=0
If $z_0=ρ eΘ{ jθ }$isarooto f thepolynom,then
L−1
P(z0 ) = ∑ pk ρ k e− jkθ ,
k=0
and
L−1
P(z∗0 ) = ∑ pk ρ k e jkθ .
k=0
This shows that if the coefficients of the transfer function are real, then the zeros and poles are either real
or appear in complex conjugate pairs. This is usually the case, since these coefficients are the coefficients of
the underlying difference equation. For real filters, the difference equation has obviously real coefficients.
For real filters, the zeros and poles are either real or appear in complex conjugate pairs.
Page 118/255
11.1. POLE-ZERO LOCATIONS AND TRANSFER FUNCTIONS BEHAVIOR 119
%m a t p l o t l i b i n l i n e
p o l e s =np . a r r a y ( [ 0 ] )
z e r o s =np . a r r a y ( [ 0 . 8 5 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a s i n g l e zero " , l a b e l =" f i g : s i n g l e z e r o " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
Figure 11.1: Poles-Zeros representation, Transfer function and Impulse response for a single zero
p o l e s =np . a r r a y ( [ 0 ] )
z e r o s =np . a r r a y ( [ 0 . 9 5 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 4 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a s i n g l e zero " , l a b e l =" f i g : s i n g l e z e r o _ 2 " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
With two or more zeros, the same kind of observations holds. However,because of the interactions
berween the zeros, the minimum no more strictly occur for the frequencies of the zeros but for some close
frequencies.
This is illustrated now in the case of two complex-conjugated zeros (which corresponds to a transfer
function with real coefficients).
Page 119/255
120 CHAPTER 11. DIGITAL FILTERS
Figure 11.2: Poles-Zeros representation, Transfer function and Impulse response for a single zero
p o l e s =np . a r r a y ( [ 0 ] )
z e r o s =np . a r r a y ( [ 0 . 9 5 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) , 0 . 9 5 ∗ np . exp (−1 j ∗2∗ p i ∗ 0 . 2 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a double zero " , l a b e l =" f i g : doublezero " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
p o l e s =np . a r r a y ( [ 0 ] )
z e r o s =np . a r r a y ( [ 0 . 9 5 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) , 0 . 9 5 ∗ np . exp (−1 j ∗2∗ p i ∗ 0 . 2 ) , 0 . 9 7 ∗ np
. exp ( 1 j ∗2∗ p i ∗ 0 . 3 ) , 0 . 9 7 ∗ np . exp (−1 j ∗2∗ p i ∗ 0 . 3 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a 4 zeros " , l a b e l =" f i g : doublezero2 " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
Page 120/255
11.1. POLE-ZERO LOCATIONS AND TRANSFER FUNCTIONS BEHAVIOR 121
Figure 11.3: Poles-Zeros representation, Transfer function and Impulse response for a double zero
Figure 11.4: Poles-Zeros representation, Transfer function and Impulse response for a 4 zeros
Page 121/255
122 CHAPTER 11. DIGITAL FILTERS
we will obviously have the inverse behavior. Instead of an attenuation at a frequency close to the value given
by the angle of the root, we will obtain a surtension. This is illustrated below, in the case of a single, the
multiple poles.
z e r o s =np . a r r a y ( [ 0 ] )
p o l e s =np . a r r a y ( [ 0 . 8 5 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a s i n g l e pole " , l a b e l =" f i g : s i n g l e p o l e " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
Figure 11.5: Poles-Zeros representation, Transfer function and Impulse response for a single pole
z e r o s =np . a r r a y ( [ 0 ] )
p o l e s =np . a r r a y ( [ 0 . 9 7 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a s i n g l e pole " , l a b e l =" f i g : s i n g l e p o l e _ 2 " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
We can also remark that if the modulus of the pole becomes higher than one, then the impulse response
diverges. The system is no more stable.
Page 122/255
11.1. POLE-ZERO LOCATIONS AND TRANSFER FUNCTIONS BEHAVIOR 123
Figure 11.6: Poles-Zeros representation, Transfer function and Impulse response for a single pole
z e r o s =np . a r r a y ( [ 0 ] )
p o l e s =np . a r r a y ( [ 1 . 1 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a s i n g l e pole " , l a b e l =" f i g : s i n g l e p o l e _ 3 " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
For 4 poles, we get the following: two pairs of surtensions, for frequencies essentialy given by the
arguments of the poles.
z e r o s =np . a r r a y ( [ 0 ] )
p o l e s =np . a r r a y ( [ 0 . 9 5 ∗ np . exp ( 1 j ∗2∗ p i ∗ 0 . 2 ) , 0 . 9 5 ∗ np . exp (−1 j ∗2∗ p i ∗ 0 . 2 ) , 0 . 9 7 ∗ np
. exp ( 1 j ∗2∗ p i ∗ 0 . 3 ) , 0 . 9 7 ∗ np . exp (−1 j ∗2∗ p i ∗ 0 . 3 ) ] )
A= Z e r o s P o l e s D i s p l a y ( p o l e s , z e r o s )
f i g c a p t i o n ( " P o l e s −Z e r o s r e p r e s e n t a t i o n , T r a n s f e r f u n c t i o n and I m p u l s e
response f o r a 4 poles " , l a b e l =" f i g : doublepoles2 " )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
Page 123/255
124 CHAPTER 11. DIGITAL FILTERS
Figure 11.7: Poles-Zeros representation, Transfer function and Impulse response for a single pole
Figure 11.8: Poles-Zeros representation, Transfer function and Impulse response for a 4 poles
Page 124/255
11.1. POLE-ZERO LOCATIONS AND TRANSFER FUNCTIONS BEHAVIOR 125
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
Figure 11.9: Poles-Zeros representation, Transfer function and Impulse response for a 2 poles and 2 zeros
Hence we see that it is possible to understand the behavior of transfer function by studying the location
of their poles and zeros. It is even possible to design transfer function by optimizing the placement of their
poles and zeros.
For instance, given the poles and zeros in the previous example, we immediately find the coefficients of
the filter by computing the corresponding polynomials:
p r i n t ( " p o l e s " ,A . p o l e s )
p r i n t ( " z e r o s " ,A . z e r o s )
p r i n t ( " c o e f f s a : " , np . p o l y (A . p o l e s ) )
p r i n t ( " c o e f f s b : " , np . p o l y (A . z e r o s ) )
Page 125/255
126 CHAPTER 11. DIGITAL FILTERS
In order to further investigate these properties and experiment with the pole and zeros placement, your
servant has prepared a ZerosPolesPlay class. **Enjoy!**
%m a t p l o t l i b
%r u n z e r o s p o l e s p l a y . py
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
# %l o a d z e r o s p o l e s p l a y . py
"""
T r a n s f e r f u n c t i o n a d j u s t m e n t u s i n g z e r o s and p o l e s d r a g and d r o p !
j f b 2015 − l a s t u p d a t e november 2018
"""
i m p o r t numpy a s np
import m a t p l o t l i b . pyplot as p l t
from numpy i m p o r t p i
Page 126/255
11.1. POLE-ZERO LOCATIONS AND TRANSFER FUNCTIONS BEHAVIOR 127
# l i n e , = ax . p l o t ( xs , ys , ’ o ’ , p i c k e r =5 ) # 5 points tolerance
class ZerosPolesPlay () :
def __init__ ( self ,
p o l e s =np . a r r a y ( [ 0 . 7 ∗ np . exp ( 1 j ∗ 2 ∗ np . p i ∗ 0 . 1 ) ] ) ,
z e r o s =np . a r r a y ( [ 1 . 2 7 ∗ np . exp ( 1 j ∗ 2 ∗ np . p i ∗ 0 . 3 ) ] ) ,
N=1000 ,
r e s p o n s e _ r e a l = True ,
ymax = 1 . 2 ,
N i r = 6 4) :
if response_real :
s e l f . p o l e s , s e l f . p o l e s _ i s r e a l = s e l f . sym_comp ( p o l e s )
s e l f . z e r o s , s e l f . z e r o s _ i s r e a l = s e l f . sym_comp ( z e r o s )
else :
self . poles = poles
s e l f . p o l e s _ i s r e a l = ( np . a b s ( np . imag ( p o l e s ) ) < 1 e −12)
self . zeros = zeros
s e l f . z e r o s _ i s r e a l = ( np . a b s ( np . imag ( z e r o s ) ) < 1 e −12)
s e l f . ymax = np . max ( [
ymax , 1 . 2 ∗ np . max ( np . c o n c a t e n a t e ( ( np . a b s ( p o l e s ) , np . a b s ( z e r o s ) )
))
])
s e l f . p o l e s _ t h = np . a n g l e ( s e l f . p o l e s )
s e l f . p o l e s _ r = np . a b s ( s e l f . p o l e s )
s e l f . z e r o s _ t h = np . a n g l e ( s e l f . z e r o s )
s e l f . z e r o s _ r = np . a b s ( s e l f . z e r o s )
s e l f .N = N
s e l f . Nir = Nir
self . response_real = response_real
self . b e i n g _ d r a g g e d = None
self . n a t u r e _ d r a g g e d = None
self . p o l e s _ l i n e = None
self . z e r o s _ l i n e = None
self . setup_main_screen ()
self . connect ( )
self . update ( )
def setup_main_screen ( s e l f ) :
Page 127/255
128 CHAPTER 11. DIGITAL FILTERS
y =0.98 ,
horizontalalignment=’ left ’ )
# s e l f . ax . s e t _ t i t l e ( ’ P o l e s & z e r o s a d j u s t m e n t ’ , f o n t s i z e =16 , c o l o r = ’
blue ’)
s e l f . ax . s e t _ y l i m ( [ 0 , s e l f . ymax ] )
s e l f . p o l e s _ l i n e , = s e l f . ax . p l o t (
s e l f . p o l e s _ t h , s e l f . p o l e s _ r , ’ ob ’ , ms =9 , p i c k e r =5 , l a b e l = " P o l e s "
)
s e l f . z e r o s _ l i n e , = s e l f . ax . p l o t (
s e l f . z e r o s _ t h , s e l f . z e r o s _ r , ’ Dr ’ , ms =9 , p i c k e r =5 , l a b e l = " Z e r o s "
)
s e l f . ax . p l o t (
np . l i n s p a c e (−np . p i , np . p i , 5 0 0 ) , np . o n e s ( 5 0 0 ) , ’−−b ’ , lw = 1)
s e l f . ax . l e g e n d ( l o c = 1 )
# Transfer function
# s e l f . f i g T F , s e l f . axTF = p l t . s u b p l o t s ( 2 , s h a r e x = T r u e )
# s e l f . axTF0= s e l f . f i g . a d d _ s u b p l o t ( 2 2 2 , f a c e c o l o r = ’ L i g h t Y e l l o w ’ )
s e l f . axTF0 = p l t . s u b p l o t ( g s [ 0 , 6 : 1 1 ] , f a c e c o l o r = ’ L i g h t Y e l l o w ’ )
# s e l f . axTF [ 0 ] . s e t _ a x i s _ b g c o l o r ( ’ L i g h t Y e l l o w ’ )
s e l f . axTF0 . s e t _ t i t l e ( ’ T r a n s f e r f u n c t i o n ( modulus ) ’ )
# s e l f . axTF1= s e l f . f i g . a d d _ s u b p l o t ( 2 2 4 , f a c e c o l o r = ’ L i g h t Y e l l o w ’ )
s e l f . axTF1 = p l t . s u b p l o t ( g s [ 1 , 6 : 1 1 ] , f a c e c o l o r = ’ L i g h t Y e l l o w ’ )
s e l f . axTF1 . s e t _ t i t l e ( ’ T r a n s f e r f u n c t i o n ( p h a s e ) ’ )
s e l f . axTF1 . s e t _ x l a b e l ( ’ F r e q u e n c y ’ )
f = np . l i n s p a c e ( 0 , 1 , s e l f . N)
s e l f . TF = np . f f t . f f t ( np . p o l y ( s e l f . z e r o s ) , s e l f . N) / np . f f t . f f t (
np . p o l y ( s e l f . p o l e s ) , s e l f . N)
s e l f . TF_m_line , = s e l f . axTF0 . p l o t ( f , np . a b s ( s e l f . TF ) )
s e l f . T F _ p _ l i n e , = s e l f . axTF1 . p l o t ( f , 180 / np . p i ∗ np . a n g l e ( s e l f . TF )
)
# s e l f . f i g T F . c a n v a s . draw ( )
# Impulse response
# self . figIR = plt . figure ()
# s e l f . axIR = s e l f . f i g . a d d _ s u b p l o t ( 2 2 3 , f a c e c o l o r = ’ L a v e n d e r ’ )
s e l f . axIR = p l t . s u b p l o t ( g s [ 2 , 6 : 1 1 ] , f a c e c o l o r = ’ L a v e n d e r ’ )
s e l f . IR = s e l f . impz ( s e l f . z e r o s , s e l f . p o l e s ,
s e l f . N i r ) # np . r e a l ( np . f f t . i f f t ( s e l f . TF ) )
s e l f . axIR . s e t _ t i t l e ( ’ I m p u l s e r e s p o n s e ’ )
s e l f . axIR . s e t _ x l a b e l ( ’ Time ’ )
s e l f . I R _ m _ l i n e , = s e l f . axIR . p l o t ( s e l f . IR )
# s e l f . f i g I R . c a n v a s . draw ( )
s e l f . f i g . c a n v a s . draw ( )
self . fig . tight_layout ()
d e f impz ( s e l f , z e r o s , p o l e s , L ) :
from s c i p y . s i g n a l i m p o r t l f i l t e r
a = np . p o l y ( p o l e s )
b = np . p o l y ( z e r o s )
d = np . z e r o s ( L )
d [0] = 1
h = l f i l t e r (b , a , d)
return h
d e f sym_comp ( s e l f , p ) :
L = np . s i z e ( p )
r = l i s t ()
c = l i s t ()
Page 128/255
11.1. POLE-ZERO LOCATIONS AND TRANSFER FUNCTIONS BEHAVIOR 129
for z in p :
i f np . a b s ( np . imag ( z ) ) < 1 e −12:
r . append ( z )
else :
c . append ( z )
o u t = np . c o n c a t e n a t e ( ( c , r , np . c o n j u g a t e ( c [ : : − 1 ] ) ) )
i s r e a l = ( np . a b s ( np . imag ( o u t ) ) < 1 e −12)
r e t u r n out , i s r e a l
#sym_comp ( [ 1 + 1 j , 2 , 3−2 j ] )
def connect ( s e l f ) :
s e l f . c i d p i c k = s e l f . f i g . canvas . mpl_connect ( ’ pick_event ’ , s e l f .
on_pick )
s e l f . c i d r e l e a s e = s e l f . f i g . canvas . mpl_connect ( ’ b u t t o n _ r e l e a s e _ e v e n t ’
,
self . on_release )
s e l f . cidmotion = s e l f . f i g . canvas . mpl_connect ( ’ motion_notify_event ’ ,
s e l f . on_motion )
def update ( s e l f ) :
# p o l e s and z e r o s
# s e l f . f i g . c a n v a s . draw ( )
f = np . l i n s p a c e ( 0 , 1 , s e l f . N)
s e l f . TF = np . f f t . f f t ( np . p o l y ( s e l f . z e r o s ) , s e l f . N) / np . f f t . f f t (
np . p o l y ( s e l f . p o l e s ) , s e l f . N)
s e l f . TF_m_line . s e t _ y d a t a ( np . a b s ( s e l f . TF ) )
M = np . max ( np . a b s ( s e l f . TF ) )
# update the yscale
c u r r e n t _ y l i m = s e l f . axTF0 . g e t _ y l i m ( ) [ 1 ]
i f M > current_ylim or M < 0.5 ∗ current_ylim :
s e l f . axTF0 . s e t _ y l i m ( [ 0 , 1 . 2 ∗ M] )
# phase
s e l f . T F _ p _ l i n e . s e t _ y d a t a ( 1 8 0 / np . p i ∗ np . a n g l e ( s e l f . TF ) )
# s e l f . f i g T F . c a n v a s . draw ( )
# Impulse response
s e l f . IR = s e l f . impz ( s e l f . z e r o s , s e l f . p o l e s ,
s e l f . N i r ) # np . f f t . i f f t ( s e l f . TF )
# p r i n t ( s e l f . IR )
s e l f . I R _ m _ l i n e . s e t _ y d a t a ( s e l f . IR )
M = np . max ( s e l f . IR )
Mm = np . min ( s e l f . IR )
# update the yscale
c u r r e n t _ y l i m = s e l f . axIR . g e t _ y l i m ( )
update_ylim = False
i f M > current_ylim [1 ] or M < 0.5 ∗ current_ylim [ 1 ] :
u p d a t e _ y l i m = True
i f Mm < c u r r e n t _ y l i m [ 0 ] o r np . a b s (Mm) > 0 . 5 ∗ np . a b s (
current_ylim [
0]) :
u p d a t e _ y l i m = True
i f u p d a t e _ y l i m : s e l f . axIR . s e t _ y l i m ( [Mm, 1 . 2 ∗ M] )
Page 129/255
130 CHAPTER 11. DIGITAL FILTERS
# s e l f . f i g I R . c a n v a s . draw ( )
s e l f . f i g . c a n v a s . draw ( )
def on_motion ( s e l f , e v e n t ) :
" " " Move t h e s e l e c t e d p o i n t s and u p d a t e t h e g r a p h s . " " "
i f e v e n t . i n a x e s ! = s e l f . ax : r e t u r n
i f s e l f . b e i n g _ d r a g g e d i s None : r e t u r n
p = s e l f . b e i n g _ d r a g g e d # i n d e x o f p o i n t s on t h e l i n e b e i n g d r a g g e d
xd = e v e n t . x d a t a
yd = e v e n t . y d a t a
# p r i n t ( yd )
i f s e l f . n a t u r e _ d r a g g e d == s e l f . p o l e s _ l i n e :
x , y = self . poles_line . get_data ()
i f not ( s e l f . p o l e s _ i s r e a l [ p ] ) :
x [ p ] , y [ p ] = xd , yd
else :
i f np . p i / 2 < xd < 3 ∗ np . p i / 2 :
x [ p ] , y [ p ] = np . p i , yd
else :
x [ p ] , y [ p ] = 0 , yd
x[−p − 1 ] , y[−p − 1 ] = −x [ p ] , y [ p ]
s e l f . poles_line . set_data (x , y ) # then update the l i n e
# print ( self . poles )
s e l f . p o l e s [ p ] = y [ p ] ∗ np . exp ( 1 j ∗ x [ p ] )
s e l f . p o l e s [−p − 1 ] = y [ p ] ∗ np . exp (−1 j ∗ x [ p ] )
else :
x , y = self . zeros_line . get_data ()
i f not ( s e l f . z e r o s _ i s r e a l [ p ] ) :
x [ p ] , y [ p ] = xd , yd
else :
i f np . p i / 2 < xd < 3 ∗ np . p i / 2 :
x [ p ] , y [ p ] = np . p i , yd
else :
x [ p ] , y [ p ] = 0 , yd
x[−p − 1 ] , y[−p − 1 ] = −x [ p ] , y [ p ]
s e l f . zeros_line . set_data (x , y ) # then update the l i n e
s e l f . z e r o s [ p ] = y [ p ] ∗ np . exp ( 1 j ∗ x [ p ] ) # t h e n u p d a t e t h e l i n e
s e l f . z e r o s [−p − 1 ] = y [ p ] ∗ np . exp (−1 j ∗ x [ p ] )
s e l f . update ( ) # and t h e p l o t
s e l f . b e i n g _ d r a g g e d = None
s e l f . n a t u r e _ d r a g g e d = None
Page 130/255
11.2. SYNTHESIS OF FIR FILTERS 131
s e l f . update ( )
# c a s e o f complex p o l e s and z e r o s
p o l e s = np . a r r a y (
[ 0 . 8 ∗ np . exp ( 1 j ∗ 2 ∗ p i ∗ 0 . 1 2 5 ) , 0 . 8 ∗ np . exp ( 1 j ∗ 2 ∗ p i ∗ 0 . 1 5 ) ,
0.5])
z e r o s = np . a r r a y (
[ 0 . 9 5 ∗ np . exp ( 1 j ∗ 2 ∗ p i ∗ 0 . 1 7 5 ) , 1 . 4 ∗ np . exp ( 1 j ∗ 2 ∗ p i ∗ 0 . 3 ) ,
0])
A = ZerosPolesPlay ( poles , zeros )
"""
# case of a s i n g l e r e a l pole
p o l e s =np . a r r a y ( [ 0 . 5 ] )
z e r o s =np . a r r a y ( [ 0 ] )
A= Z e r o s P o l e s P l a y ( p o l e s , z e r o s , r e s p o n s e _ r e a l = F a l s e )
"""
p l t . show ( )
/usr/local/lib/python3.5/site-packages/matplotlib/tight_layout.py:199: UserWarning:
warnings.warn(’Tight layout not applied. ’
Page 131/255
132 CHAPTER 11. DIGITAL FILTERS
which links L samples in the frequency domain to L samples in the time domain. Hence, what we need to
do is simply to sample the frequency response on the required number of samples, and then to compute the
associated impulse response by inverse DFT. This is really simple.
%m a t p l o t l i b i n l i n e
L = 21
#ideal f i l t e r
fc = 0.1
N = 20 ∗ L
M = i n t ( np . r o u n d (N ∗ f c ) )
r = np . z e r o s (N)
r [ 0 :M] = 1
r [−1:−M: −1] = 1
p l t . p l o t ( np . a r a n g e ( 0 , N) / N, ( r ) )
# sampling the i d e a l f i l t e r
# we want a t o t a l o f L s a m p l e s ; t h e n s t e p =N / / L ( i n t e g e r d i v i s i o n )
step = N / / L
rs = r [ : : step ]
p l t . p l o t ( np . a r a n g e ( 0 , N, s t e p ) / N, ( r s ) , ’ og ’ )
_ = p l t . ylim ( [ 0 , 1 . 1 ] )
_ = p l t . xlim ( [ 0 , 1 ] )
p l t . show ( )
The associated impulse response is given by the inverse DFT. It is represented on figure 11.10.
%p r e c i s i o n 3
# The i m p u l s e r e s p o n s e :
h = real ( i f f t ( rs ) )
p r i n t ( " Impulse response h : " , h )
p l t . stem ( h )
p l t . t i t l e ( " Impulse response " )
figcaption (
" I m p u l s e r e s p o n s e o b t a i n e d by f r e q u e n c y s a m p l i n g " , l a b e l = " f i g : h _ s a m p f r e q
")
Page 132/255
11.2. SYNTHESIS OF FIR FILTERS 133
Impulse response h: [ 0.238 0.217 0.161 0.086 0.013 -0.039 -0.059 -0.048 -0.015
0.044 0.044 0.021 -0.015 -0.048 -0.059 -0.039 0.013 0.086 0.161
0.217]
This impulse response is periodic, because of the implicit periodicity of sequences after use of a DFT
operation. The “true” response is symmetric around n = 0. We can display it using a fftshift.
delay = (
L − 1
) / 2 i f L % 2 e l s e L / 2 # d e l a y o f L / 2 i s L i s even , ( L−1) / 2 o t h e r w i s e
_ = p l t . p l o t ( np . a r a n g e ( 0 , L ) − d e l a y , f f t s h i f t ( h ) )
It is very instructive to look at the frequency response which is effectively realized. In other words we
must look at what happens between the points. For that, we approximate the discrete time Fourier transform
by zero-padding. At this point, it is really important to shift the impulse response because the zero-padding
corresponds to an implicit truncation on L points of the periodic sequence, and we want to keep the true
impulse response. This operation introduces a delay of L/2 is L is even and (L − 1)/2 otherwise.
NN = 1000
H = fft ( fftshift (h) ,
NN) # ## <−− Here i t i s r e a l l y i m p o r t a n t t o i n t r o d u c e a f f t s h i f t
# ## o t h e r w i s e , t h e s e q u e n c e h a s l a r g e t r a n s i t i o n s
# ## on t h e b o u n d a r i e s
Then we display this frequency response and compare it to the ideal filter and to the frequency samples.
#ideal f i l t e r
p l t . p l o t ( np . a r a n g e ( 0 , N) / N, ( r ) )
# sampling the i d e a l f i l t e r
p l t . p l o t ( np . a r a n g e ( 0 , N, s t e p ) / N, ( r s ) , ’ og ’ )
_ = p l t . ylim ( [ 0 , 1 . 1 ] )
_ = p l t . xlim ( [ 0 , 1 ] )
Page 133/255
134 CHAPTER 11. DIGITAL FILTERS
#realized f i l t e r
_ = p l t . p l o t ( np . a r a n g e ( 0 , NN) / NN, np . a b s (H) )
_ = p l t . y l i m ( [ 0 , 1 . 1 ∗ np . max ( np . a b s (H) ) ] )
Once we have done all this, we can group all the code into a function and experiment with the parameters,
using the interactive facilities of IPython notebook widgets.
# mpld3 . d i s a b l e _ n o t e b o o k ( )
d e f L P _ s y n t h _ f s a m p l i n g ( f c = 0 . 2 , L=20 , p l o t _ i m p r e s p = F a l s e ) :
#ideal f i l t e r
N=20∗L ; M= i n t ( np . r o u n d (N∗ f c ) )
r =np . z e r o s (N) ; r [ 0 :M] = 1 ; r [−1:−M: −1]=1
# sampling the i d e a l f i l t e r
Page 134/255
11.2. SYNTHESIS OF FIR FILTERS 135
# we want a t o t a l o f L s a m p l e s ; t h e n s t e p =N / / L ( i n t e g e r d i v i s i o n )
s t e p =N / / L
rs=r [ : : step ]
# c l e a r _ o u t p u t ( w a i t =True )
# The i m p u l s e r e s p o n s e :
h= r e a l ( i f f t ( r s ) )
i f plot_impresp :
plt . figure ()
%p r e c i s i o n 3
plt . plot (h)
p l t . t i t l e ( " Impulse response " )
plt . figure ()
NN=1000
H= f f t ( f f t s h i f t ( h ) ,NN)
#ideal f i l t e r
p l t . p l o t ( np . a r a n g e ( 0 ,N) / N, ( r ) )
# sampling the i d e a l f i l t e r
p l t . p l o t ( np . a r a n g e ( 0 , N, s t e p ) / N, ( r s ) , ’ og ’ )
p l t . x l a b e l ( " Frequency " )
_= p l t . x l i m ( [ 0 , 1 ] )
#realized f i l t e r
_= p l t . p l o t ( np . a r a n g e ( 0 ,NN) /NN, np . a b s (H) )
_= p l t . y l i m ( [ 0 , 1 . 1 ∗ np . max ( np . a b s (H) ) ] )
_= i n t e r a c t ( L P _ s y n t h _ f s a m p l i n g , f c = w i d g e t s . F l o a t S l i d e r ( min =0 , max =1 , s t e p
=0.01 , value =0.2) ,
L= w i d g e t s . I n t S l i d e r ( min =1 , max =200 , v a l u e = 10 ) , p l o t _ i m p r e s p = F a l s e )
This is a variation on the interactive widgets example, where we do not use the interact function but
rather directly the Jupyter widgets.
from i p y w i d g e t s i m p o r t w i d g e t s
out = widgets . Output ( )
Page 135/255
136 CHAPTER 11. DIGITAL FILTERS
@out . c a p t u r e ( c l e a r _ o u t p u t = True , w a i t = T r u e )
def wLP_synth_fsampling ( ) :
f c = fcw . v a l u e
L = Lw . v a l u e
p l o t _ i m p r e s p = imprespw . v a l u e
LP_synth_fsampling ( fc , L , p l o t _ i m p r e s p )
p l t . show ( )
c = w i d g e t s . HBox ( c h i l d r e n = [ fcw , Lw ] )
# d= w i d g e t s . VBox ( c h i l d r e n = [ c , imprespw ] )
d = w i d g e t s . VBox ( c h i l d r e n = [ fcw , Lw , imprespw ] )
d . align = " center "
d . box_style = " info "
d . l a y o u t = L a y o u t ( w i d t h = ’40% ’ , a l i g n _ i t e m s = ’ b a s e l i n e ’ , b o r d e r _ r a d i u s = 50 )
d i s p l a y ( w i d g e t s . VBox ( [ d , o u t ] ) )
H( f ) → h(n).
• Of course, this step would require by hand calculations, or a symbolic computation system. This leads
to many exercises for students in traditional signal processing.
• In practice, one often begins with a precise numerical representation of the ideal filter and obtain the
impulse response by IDFT. In this sense, the method is linked with synthesis by frequency sampling
seen above.
If we begin with a transfer function which is only specified in magnitude, and if we choose to consider it
as purely real, then the impulse response is even, thus non-causal. Furthermore, when the transfer function
is band-limited, then its inverse transform has infinite duration. This is a consequence of the uncertainty
principle for the Fourier transform. Hence, we face two problems: 1. the impulse response is non-causal, 2.
it has infinite support.
Page 136/255
11.2. SYNTHESIS OF FIR FILTERS 137
A simple illustration is the following. If we consider an ideal low-pass filter, with cut-off frequency fc ,
then its inverse Fourier transform is a cardinal sine
f c = 0 . 1 ; N= 6 0 ; n=np . a r a n g e (−N, N, 0 . 1 )
p l t . p l o t ( n , 2∗ f c ∗ np . s i n c ( 2 ∗ f c ∗n ) )
_= p l t . t i t l e ( " I m p u l s e r e s p o n s e f o r an i d e a l low−p a s s w i t h $ f _ c ={} $ " . f o r m a t ( f c
))
In order to get a finite number of points for our filter, we have no other solution but truncate the impulse
response. Beware that one (you) need to keep both the positive and negative indexes. To get a causal system,
it then suffices to shift the impulse response by the length of the non causal part. In the case of our ideal
low-pass filter, this gives:
# L : number o f p o i n t s o f t h e i m p u l s e r e s p o n s e ( odd )
L = 21
M = (L − 1) / / 2
fc = 0.2
N = 40
step = 0.1
i n v s t e p = i n t (1 / step )
n = np . a r a n g e (−N, N, s t e p )
h = 2 ∗ f c ∗ np . s i n c ( 2 ∗ f c ∗ n )
plt . plot (n , h)
w = np . z e r o s ( np . s h a p e ( n ) )
w[ where ( a b s ( n ∗ i n v s t e p ) < M ∗ i n v s t e p ) ] = 1
p l t . p l o t ( n , 2 ∗ f c ∗ w, ’−−r ’ )
i r _ w = np . z e r o s ( np . s h a p e ( n ) )
i r _ w [ where ( a b s ( n ∗ i n v s t e p ) < M ∗ i n v s t e p ) ] = h [ where (
abs ( n ∗ i n v s t e p ) < M ∗ i n v s t e p ) ]
# plt . figure () ;
_ = p l t . p l o t ( n , ir_w )
Then the realized transfer function can be computed and compared with the ideal filter.
Page 137/255
138 CHAPTER 11. DIGITAL FILTERS
H_w = f f t ( i r _ w [ : : i n v s t e p ] , 1 0 0 0 )
f = np . l i n s p a c e ( 0 , 1 , 1 0 0 0 )
p l t . p l o t ( f , np . a b s (H_w) , l a b e l = " R e a l i z e d f i l t e r " )
plt . plot (
[ 0 , fc , fc , 1 − fc , 1 − fc , 1 ] , [ 1 , 1 , 0 , 0 , 1 , 1 ] , l a b e l =" I d e a l filter "
)
_ = p l t . legend ( loc= ’ best ’ )
We observe that the frequency response presents ripples in both the band-pass and the stop-band. Be-
sides, the transition bandwidth, from the band-pass to the stop-band is large. Again, we can put all the
previous commands in the form of a function, and experiment interactively with the parameters.
d e f LP_synth_window ( f c = 0 . 2 , L=21 , p l o t _ i m p r e s p = F a l s e ) :
Page 138/255
11.2. SYNTHESIS OF FIR FILTERS 139
# L : number o f p o i n t s o f t h e i m p u l s e r e s p o n s e ( odd )
M = (L − 1) / / 2
step = 0.1
i n v s t e p = i n t (1 / step )
n = np . a r a n g e (−M − 5 , M + 5 , s t e p )
h = 2 ∗ f c ∗ np . s i n c ( 2 ∗ f c ∗ n )
w = np . z e r o s ( np . s h a p e ( n ) )
w[ where ( a b s ( n ∗ i n v s t e p ) < M ∗ i n v s t e p ) ] = 1
i r _ w = np . z e r o s ( np . s h a p e ( n ) )
i r _ w [ where ( a b s ( n ∗ i n v s t e p ) < M ∗ i n v s t e p ) ] = h [ where (
abs ( n ∗ i n v s t e p ) < M ∗ i n v s t e p ) ]
# plt . figure () ;
i f plot_impresp :
plt . figure ()
p l t . p l o t ( n , w, ’−−r ’ )
_ = p l t . p l o t ( n , ir_w )
plt . figure ()
H_w = f f t ( i r _ w [ : : i n v s t e p ] , 1 0 0 0 )
f = np . l i n s p a c e ( 0 , 1 , 1 0 0 0 )
p l t . p l o t ( f , np . a b s (H_w) , l a b e l = " R e a l i z e d f i l t e r " )
plt . plot (
[ 0 , fc , fc , 1 − fc , 1 − fc , 1 ] , [ 1 , 1 , 0 , 0 , 1 , 1 ] ,
l a b e l =" I d e a l f i l t e r " )
p l t . legend ( loc= ’ best ’ )
# r e t u r n ir_w
_ = interact (
LP_synth_window ,
fc=widgets . F l o a t S l i d e r (
min =0 , max = 0 . 4 9 , s t e p = 0 . 0 1 , v a l u e = 0 . 2 ) ,
L= w i d g e t s . I n t S l i d e r (
min =1 , max =200 , v a l u e = 10 ) ,
plot_impresp=False )
Page 139/255
140 CHAPTER 11. DIGITAL FILTERS
We observe that the transition bandwidth varies with the number of points kept in the imulse response:
and that the larger the number of points, the thinner the transition. We also observe that though the ripples
oscillations have higher frequency, their amplitude do no change with the number of points.
There is a simple explanation for these observations, as well as directions for improvement. Instead of
the rectangular truncating as above, it is possible to consider more general weight functions, say w(n) of
length N. The true impulse response is thus apodized (literal translation: “removing the foot”) by multipli-
cation with the window function:
hw (n) = h(n)w(n).
By the Plancherel theorem, we immediately get that
Hw (h) = [H ∗W ]( f ).
The resulting filter is thus the convolution of the ideal response with the Fourier transform of the window
function.
In the example above, the window function is rectangular. As is now well know, its Fourier transform is
a discrete cardinal sine (a ratio of two sine)
sin (π f (2L + 1))
W(f) = .
(2L + 1) sin (π f )
Hence, the realized filter results from the convolution between the reactangle representing the ideal low-pass
with a cardinal sine. This yields that the transition bandwidth is essentially given by the integral of the main
lobe of the cardinal sine, and that the amplitude of the ripples are due to the integrals of the sidelobes. In
order to improve the synthetiszed filter, we can adjust the number of taps of the impulse response and/or
choose another weight window.
Many window functions have been proposed and are used for the design of FIR filters. These windows
are also very useful in spectrum analysis where the same kind of problems – width of a main lobe, ripples
due to the side-lobes, are encountered. A series of windows is presented in the following table. Many other
windows exist, and entire books are devoted to their characterization.
Page 140/255
11.2. SYNTHESIS OF FIR FILTERS 141
with [a0 , a1 , a2 , a3 ] = [0.35875, 0.48829, 0.14128, 0.01168]. The Kaiser-Bessel window function also per-
forms very well. Its expression is
( √ )
4n2
w(n) = I0 β 1 − /I0 (β ),
(M − 1)2
where I0 is the modified zeroth-order Bessel function. The shape parameter β determines a trade-off
between main-lobe width and side lobe level. As β gets large, the window narrows.
See the detailed table in the book “Window Functions and Their Applications in Signal Processing” by
Prabhu (2013). We have designed a displaying and comparison tool for the window functions. The listing
is provided in the appendix, but for now, readers of the IPython notebook version can experiment a bit by
issuing the command %run windows_disp.ipy.
" " " T h i s i s from s c i p y . s i g n a l . get_window ( ) h e l p
L i s t o f windows :
b o x c a r , t r i a n g , blackman , hamming , hann , b a r t l e t t , f l a t t o p ,
p a r z e n , bohman , b l a c k m a n h a r r i s , n u t t a l l , b a r t h a n n ,
k a i s e r ( needs beta ) , gaussian ( needs s t d ) ,
g e n e r a l _ g a u s s i a n ( n e e d s power , w i d t h ) ,
s l e p i a n ( needs width ) , chebwin ( needs a t t e n u a t i o n ) " " "
windows = [ ’ b o x c a r ’ , ’ t r i a n g ’ , ’ b l a c k m a n ’ , ’ hamming ’ , ’ hann ’ , ’ b a r t l e t t ’ , ’
flattop ’ ,
’ p a r z e n ’ , ’ bohman ’ , ’ b l a c k m a n h a r r i s ’ , ’ n u t t a l l ’ , ’ b a r t h a n n ’ ]
windows_1parameter =[ ’ k a i s e r ’ , ’ g a u s s i a n ’ , ’ s l e p i a n ’ , ’ chebwin ’ ]
windows_2parameter =[ ’ g e n e r a l _ g a u s s i a n ’ ]
%r u n w i n d o w s _ d i s p _ j u p . i p y
The main observation is that with N fixed, we have a trade-off to find between the width of the main
lobe, thus of the transition width, and the amplitude of the side-lobes. The choice is usually done on a case
by case basis, which may also include other parameters. To sum it all up, the window method consists in:
• calculate (or approximate) the impulse response associated with an ideal impulse response,
• choose a number of samples, and a window function, and apodize the impulse response. The choice
of the number of points an window function can also be motivated by maximum level of ripples in the
band pass and/or in the stop band.
• shift the resulting impulse response by half the number of samples in order to obtain a causal filter.
It is quite simple to adapt the previous script with the rectangular window to accept more general win-
dows. This is done by adding a parameter window.
d e f LP_synth_genwindow ( f c = 0 . 2 ,
L=21 ,
window= ’ b o x c a r ’ ,
plot_impresp =False ,
p l o t _ t r a n s f e r f u n c =True ) :
# L : number o f p o i n t s o f t h e i m p u l s e r e s p o n s e ( odd )
Page 141/255
142 CHAPTER 11. DIGITAL FILTERS
M = (L − 1) / / 2
step = 1
i n v s t e p = i n t (1 / step )
n = np . a r a n g e (−M, M + 1 , s t e p )
h = 2 ∗ f c ∗ np . s i n c ( 2 ∗ f c ∗ n )
w = s i g . get_window ( window , 2 ∗ M + 1 )
ir_w = w ∗ h
# plt . figure () ;
i f plot_impresp :
plt . figure ()
p l t . p l o t ( n , w, ’−−r ’ , l a b e l = " Window f u n c t i o n " )
_ = p l t . p l o t ( n , h , l a b e l =" I n i t i a l impulse response " )
_ = p l t . p l o t ( n , ir_w , l a b e l = " Windowed i m p u l s e r e s p o n s e " )
p l t . legend ( )
H_w = f f t ( i r _ w [ : : i n v s t e p ] , 1 0 0 0 )
if plot_transferfunc :
plt . figure ()
f = np . l i n s p a c e ( 0 , 1 , 1 0 0 0 )
p l t . p l o t ( f , np . a b s (H_w) , l a b e l = " R e a l i z e d f i l t e r " )
plt . plot (
[ 0 , fc , fc , 1 − fc , 1 − fc , 1 ] , [ 1 , 1 , 0 , 0 , 1 , 1 ] ,
l a b e l =" I d e a l f i l t e r " )
p l t . legend ( loc= ’ best ’ )
r e t u r n ir_w
w = interactive (
LP_synth_genwindow ,
fc=widgets . F l o a t S l i d e r (
min =0 , max = 0 . 4 9 , s t e p = 0 . 0 1 , v a l u e = 0 . 2 ) ,
L= w i d g e t s . I n t S l i d e r (
min =1 , max =200 , v a l u e = 10 ) ,
window= w i d g e t s . Dropdown ( o p t i o n s =windows ) ,
plot_impresp =False ,
p l o t _ t r a n s f e r f u n c =True )
w
Exercise 9. The function LP_synth_genwindow returns the impulse response of the synthetized filter.
Create a signal xtest = sin(2π f0 n) + sin(2π f1 n) + sin(2π f2 n), with f0 = 0.14, f1 = 0.24, f2 = 0.34 and filter
this signal with the synthetized filter, for fc = 0.2, L = 50, and for a hamming window. Comment on the
results.
# define constants
n = np . a r a n g e ( 0 , 1 0 0 )
f0 , f1 , f 2 = 0 . 1 4 , 0 . 2 4 , 0 . 3 4
# the t e s t signal
x t e s t = 0 # Complete h e r e
plt . plot ( xtest )
plt . t i t l e ( " I n i t i a l signal " )
# compute t h e f i l t e r
# h1 = LP_synth_genwindow (
# Complete h e r e
#)
# then f i l t e r the signal
y1 = 0 # C o m p l e t e h e r e
# and d i s p l a y i t
Page 142/255
11.2. SYNTHESIS OF FIR FILTERS 143
plt . figure ()
p l t . p l o t ( y1 )
plt . t i t l e ( " Filtered signal " )
Solution
# define constants
n = np . a r a n g e ( 0 , 1 0 0 )
f0 , f1 , f 2 = 0 . 1 4 , 0 . 2 4 , 0 . 3 4
Page 143/255
144 CHAPTER 11. DIGITAL FILTERS
# the t e s t signal
x t e s t = s i n (2 ∗ pi ∗ f0 ∗ n ) + s i n (2 ∗ pi ∗ f1 ∗ n ) + cos (2 ∗ pi ∗ f2 ∗ n )
plt . plot ( xtest )
plt . t i t l e ( " I n i t i a l signal " )
# compute t h e f i l t e r
h1 = LP_synth_genwindow (
fc =0.2 ,
L=50 ,
window= ’ hamming ’ ,
plot_impresp =False ,
plot_transferfunc =False )
# then f i l t e r the signal
y1 = s i g . l f i l t e r ( h1 , [ 1 ] , x t e s t )
# and d i s p l a y i t
plt . figure ()
p l t . p l o t ( y1 )
plt . t i t l e ( " Filtered signal " )
The whole synthesis workflow for the window method is available in two specialized functions of the
scipy library. Nowadays, it is really useless to redevelop existing programs. It is much more interesting to
gain insights on what is really done and how things work. This is actually the goal of this lecture. The two
functions avalaible in scipy.signal are firwin and firwin2.
Exercise 10. Use one of these functions to design a high-pass filter with cut-off frequency at fc = 0.3. Filter
the preceding signal xtest and display the results.
# define constants
n = np . a r a n g e ( 0 , 2 0 0 )
f0 , f1 , f 2 = 0 . 1 4 , 0 . 2 , 0 . 3 4
# the t e s t signal
Page 144/255
11.2. SYNTHESIS OF FIR FILTERS 145
Page 145/255
146 CHAPTER 11. DIGITAL FILTERS
Page 146/255
11.3. SYNTHESIS OF IIR FILTERS BY THE BILINEAR TRANSFORMATION METHOD 147
Page 147/255
148 CHAPTER 11. DIGITAL FILTERS
for the design of analog filters, such as Butterworth, Chebyshev, or elliptic filter designs. Then, the idea
is to map the digital filter into an equivalent analog filter, which can be designed optimally, and map back
the design to the digital domain. The key for this procedure is to dispose of a reversible mapping from the
analog domain to the digital domain.
1 − z−1
p=k , (11.3)
1 + z−1
where k is an arbitrary constant. The usual derivation leads to k = 2/Ts , where Ts is the sampling period.
However, using a general parameter k does not change the methodology and offers a free parameter that
enables to simplify the procedure.
The point is that this transform presents some interesting and useful features:
1. It preserves stability and minimum phase property (the zeros of the transfer function are with negative
real part (analog case) or are inside the unit circle (discrete case).
2. It maps the infinite analog axis into a periodic frequency axis in the frequency domain for discrete
signals. That mapping is highly non linear and warp the frequency components, but it recovers the
well-known property of periodicity of the Fourier transform of discrete signals.
The corresponding mapping of frequencies is obtained as follows. Letting $p=jω _a = j2π f _a$andz =
exp(ωd ) = exp( j2π fd ). Plugging this in (11.3), we readily obtain
(ω )
d
ωa = k tan , (11.4)
2
or (ω )
a
ωd = 2 arctan . (11.5)
k
The transformation (11.4) corresponds to the initial transform of the specifications in the digital domain
into analog domain specifications. It is often called a pre-warping . Figure 11.14 shows the mapping of
pulsations from one domain into the other one.
k = 2
xmin = −5 ∗ p i
xmax = −xmin
omegaa = np . a r a n g e ( xmin , xmax , 0 . 1 )
omegad = 2 ∗ np . a r c t a n ( omegaa / k )
p l t . p l o t ( omegaa , omegad )
p l t . p l o t ( [ xmin , xmax ] , [− p i , −p i ] , ’−− ’ , c o l o r = ’ l i g h t b l u e ’ )
p l t . p l o t ( [ xmin , xmax ] , [ p i , p i ] , ’−− ’ , c o l o r = ’ l i g h t b l u e ’ )
Page 148/255
11.3. SYNTHESIS OF IIR FILTERS BY THE BILINEAR TRANSFORMATION METHOD 149
# p l t . t e x t ( − 3 . 7 , 0 . 4 , ’ Fs / 2 ’ , c o l o r = ’ b l u e ’ , f o n t s i z e =1 4)
p l t . x l a b e l ( " Analog p u l s a t i o n s $ \ omega_a$ " )
p l t . y l a b e l ( " D i g i t a l p u l s a t i o n s $ \ omega_d$ " )
_ = p l t . x l i m ( [ xmin , xmax ] )
p l t . t i t l e ( " F r e q u e n c y mapping o f t h e b i l i n e a r t r a n s f o r m " )
f i g c a p t i o n ( " F r e q u e n c y mapping o f t h e b i l i n e a r t r a n s f o r m " , l a b e l = " f i g : BLT" )
When designing a digital filter using an analog approximation and the bilinear transform, we follow
these steps: Pre-warp the cutoff frequencies Design the necessary analog filter apply the bilinear transform
to the transfer function Normalize the resultant transfer function to be monotonic and have a unity passband
gain (0dB).
1. For the synthesis of the analog filter, it is convenient to work with a normalized filter such that Ω p = 1.
Therefore, as a first step, we set
k = arctan(ω p /2)
which ensures that Ω p = 1. Then, we compute Ωs = 2 arctan (ωs /k).
2. Synthetize the optimum filter in the analog domain, given the type of filter, the frequency and gain
constraints. This usually consists in determining the order of the filter such that the gain constraints
(ripples, minimum attenuation, etc) are satisfied, and then select the corresponding polynomial. This
yields a transfer function Ha (p).
3. Map back the results to the digital domain, using the bilinear transform (11.3), that is compute
Page 149/255
150 CHAPTER 11. DIGITAL FILTERS
Exercise 11. We want to synthetize a digital filter with f p = 6kHz, with a maximum attenuation of -3dB,
and a stop-band frequency of fs = 9kHz, with a minimum attenuation of -9dB. The Nyquist rate (sampling
frequency) is Fs = 36kHz.
• if we √
choose a Butterworth filter, the best order is n = 2 and the corresponding polynomial is D(p) =
p2 + 2p + 1, and the transfer function is Ha (p) = 1/D(p). Compute H(z).
• Plot H( f ). Use sig.freqz for computing the transfer function. We also provide a function
plt_LPtemplate(omega, A, Abounds=None) which displays the template of the filter. Im-
port it using from plt_LPtemplate import *.
Elements of solution
√
• k = 1/ tan(π /6) = 3
√
• Ωs = k tan(π /4) = 3
•
1 + 2z−1 + z−2
H(z) = √ √
(4 + 6) − 4z−1 + (4 − 6)z−2
# compute t h e t r a n s f e r f u n c t i o n u s i n g f r e q z
w, H = s i g . f r e q z ( [ 1 , 2 , 1 ] , [ 4 + s q r t ( 6 ) , −4, 4 − s q r t ( 6 ) ] , whole = T r u e )
# p l o t t h e r e s u l t −−w−p i c o r r e s p o n d s t o a s h i f t o f t h e p u l s a t i o n
# axis a s s o c i a t e d with the f f t s h i f t of the t r a n s f e r f u n c t i o n .
p l t . p l o t (w − p i , 20 ∗ np . l o g 1 0 ( f f t s h i f t ( np . a b s (H) ) ) )
# plot the f i l t e r template
from p l t _ L P t e m p l a t e i m p o r t ∗
p l t _ L P t e m p l a t e ( [ p i / 3 , p i / 2 ] , [ −3 , −9] , Abounds = [ 5 , −35])
p l t . t i t l e ( " R e a l i z e d f i l t e r and i t s t e m p l a t e " )
f i g c a p t i o n ( " R e a l i z e d f i l t e r and i t s t e m p l a t e " )
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:5: RuntimeW
"""
In practice, the transfer function can be generated by transforming the poles and zeros of the analog
filter into the poles and zeros of the digital filter. This is simply done using the transformation
1 + p/k
z= .
1 − p/k
It remains a global gain that can be fixed using a value at H(1) (ω = 0).This dramatically simplifies the
synthesis in practice.
Page 150/255
11.3. SYNTHESIS OF IIR FILTERS BY THE BILINEAR TRANSFORMATION METHOD 151
0. Transform the specifications of the digital filter into specifications for a low-pass digital filter.
z−2 − β2αβ −1 β −1
+1 z + β +1
3. **low-pass ω p – band-pass $ω _1, ω _2$ ∗ ∗z−1 → − β −1 −2 2αβ −1
with
β +1 z − β +1 z +1
( )
ω p +ω1
cos 2
α= ( )
ω p −ω1
cos 2
and (ω ) ( )
p ω2 − ω1
β = tan tan
2 2
Page 151/255
152 CHAPTER 11. DIGITAL FILTERS
Exercise 12. We want to synthetize an high-pass digital filter with edge frequencies 2, 4, 12 and 14 kHz, with
a maximum attenuation of 3dB in the band-pass, and a minimum attenuation of -12dB in the stop-band. The
Nyquist rate (sampling frequency) is Fs = 32kHz.
• if we √
choose a Butterworth filter, the best order is n = 2 and the corresponding polynomial is D(p) =
2
p + 2p + 1, and the transfer function is Ha (p) = 1/D(p). Compute H(z).
1 + 2z−1 + z−2
H(z) = √ √
(2 + 2) + (2 − 2)z−2
for the digital low-pass, which after the transform z−1 → −z−2 gives
1 − 2z−2 + z−4
H(z) = √ √
(2 + 2) + (2 − 2)z−4
# compute t h e t r a n s f e r f u n c t i o n u s i n g f r e q z
w, H = s i g . f r e q z (
[ 1 , 0 , −2, 0 , 1 ] , [ 2 + s q r t ( 2 ) , 0 , 0 , 0 , 2 − s q r t ( 2 ) ] , whole = T r u e )
# p l o t t h e r e s u l t −−w−p i c o r r e s p o n d s t o a s h i f t o f t h e p u l s a t i o n
# axis a s s o c i a t e d with the f f t s h i f t of the t r a n s f e r f u n c t i o n .
p l t . p l o t ( ( w − p i ) / ( 2 ∗ p i ) ∗ 3 2 , 20 ∗ np . l o g 1 0 ( f f t s h i f t ( np . a b s (H) ) ) )
plt . xlabel ( " Frequencies " )
_ = p l t . y l i m ( [ − 15 , 0 ] )
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:6: RuntimeW
Page 152/255
11.3. SYNTHESIS OF IIR FILTERS BY THE BILINEAR TRANSFORMATION METHOD 153
analog=False ,
ftype=’ butter ’ ,
o u t p u t = ’ ba ’ )
# compute t h e t t r a n s f e r f u n c t i o n u s i n g f r e q z
w, H = s i g . f r e q z ( b , a , whole = T r u e )
# p l o t t h e r e s u l t −−w−p i c o r r e s p o n d s t o a s h i f t o f t h e p u l s a t i o n
# axis a s s o c i a t e d with the f f t s h i f t of the t r a n s f e r f u n c t i o n .
p l t . p l o t (w − p i , 20 ∗ np . l o g 1 0 ( f f t s h i f t ( np . a b s (H) ) ) )
# plot the f i l t e r template
from p l t _ L P t e m p l a t e i m p o r t ∗
p l t _ L P t e m p l a t e ( [ p i / 3 , p i / 2 ] , [ −3 , −9] , Abounds = [ 1 2 , −35])
p l t . t i t l e ( " R e a l i z e d f i l t e r and i t s t e m p l a t e " )
f i g c a p t i o n ( " R e a l i z e d f i l t e r and i t s t e m p l a t e " )
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:13: Runtime
del sys.path[0]
b , a = sig . iirdesign (
[ 4 / 1 6 , 12 / 1 6 ] , [ 2 / 1 6 , 14 / 1 6 ] ,
3,
12 ,
analog=False ,
ftype=’ butter ’ ,
o u t p u t = ’ ba ’ )
# compute t h e t t r a n s f e r f u n c t i o n u s i n g f r e q z
w, H = s i g . f r e q z ( b , a , whole = T r u e )
# p l o t t h e r e s u l t −−w−p i c o r r e s p o n d s t o a s h i f t o f t h e p u l s a t i o n
# axis a s s o c i a t e d with the f f t s h i f t of the t r a n s f e r f u n c t i o n .
p l t . p l o t ( ( w − p i ) / ( 2 ∗ p i ) ∗ 3 2 , 20 ∗ np . l o g 1 0 ( f f t s h i f t ( np . a b s (H) ) ) )
_ = p l t . y l i m ( [ − 20 , 0 ] )
/home/bercherj/.local/lib/python3.5/site-packages/ipykernel_launcher.py:12: Runtime
if sys.path[0] == ’’:
Page 153/255
154 CHAPTER 11. DIGITAL FILTERS
Page 154/255
11.4. LAB – BASIC FILTERING 155
The goal of this lab is to study and apply several digital filters to a periodic signal with fun-
damental frequency f0 =200 Hz, sampled at frequency Fs =8000 Hz. This signal is corrupted
by a low drift, and that is a common problem with sensor measurements. A first filter will be
designed in order to remove this drift. In a second step, we will boost a frequency range withing
the components of this signal. Finally, we will consider the design of a simple low-pass filter
using the window method, which leads to a linear-phase filter.
This signal is contained into the vector x stored in the file sig1.npz. It is possible to load it
via the instruction f=np.load(sig1.npz)
Fs =8000
Ts = 1 / Fs
%m a t p l o t l i b i n l i n e
# u t i l i t a r y function
d e f f r e q (N, Fs = 1 ) :
""" Returns a vector of s i z e N of normalized f r e q u e n c i e s
b e t w e e n −Fs / 2 and Fs / 2 " " "
r e t u r n np . l i n s p a c e ( − 0 . 5 , 0 . 5 ,N) ∗ Fs
# To l o a d t h e s i g n a l
s i g 1 =np . l o a d ( ’ s i g 1 . npz ’ )
# s i g 1 i s a d i c t i o n n a r y . One c a n l o o k a t t h e k e y s by : s i g 1 . k e y s ( )
m= s i g 1 [ ’m’ ]
x= s i g 1 [ ’ x ’ ]
# Time
plt . figure (1)
plt . plot (x)
p l t . p l o t (m)
p l t . t i t l e ( ’ S i g n a l with slow d r i f t ’ )
p l t . x l a b e l ( " temps " )
Text(0.5, 0, ’temps’)
i m p o r t mpld3
mpld3 . e n a b l e _ n o t e b o o k ( )
Page 155/255
156 CHAPTER 11. DIGITAL FILTERS
%m a t p l o t l i b i n l i n e
# Frequency r e p r e s e n t a t i o n
N= l e n ( x )
f = f r e q (N)
p l t . p l o t ( f , abs ( f f t s h i f t ( f f t ( x ) ) ) )
# p l t . t i t l e ( ’ F o u r i e r t r a n s f o r m o f t h e s i g n a l ( modulus ) ’ )
[<matplotlib.lines.Line2D at 0x7fd29f447828>]
Page 156/255
11.4. LAB – BASIC FILTERING 157
11.4.2 Filtering
We wish now to modify the spectral content of x using different digital filters with transfer
function H(z) = B(z)/A(z). A standard Python function will be particularly useful:
• lfilter implements the associated difference equation. This function computes the output
vector y of the digital filter specified by
• the vector B (containing the coefficients of the numerator B(z),
• and by the vector A of the denominator’s coefficients A(z), for an input vector x:
y=lfilter(B,A,x)
• freqz computes the frequency response H(e j2π f /Fs ) in modulus and phase, for a filter
described by the two vectors B and A: freqz(B,A)
Theoretical part:
What analytical expression enable to compute the signal’s mean on a period?
From that, deduce a filter with impulse response g(n) which computes this mean M(n).
Find another filter, with impulse response h(n), removes this mean: xc (n) = x(n) − M(n) =
x(n) ∗ h(n). Give the expression of h(n).
Also give the analytical expressions of G(z) and H(z).
Practical part
For the averaging filter and then for the subtracting filter:
• Compute and plt.plot the two impulse responses (you may use the instruction ones(L)
which returns a vector of L ones.
• plt.plot the frequency responses of these two filters. You may use the function fft which
returns the Fourier transform, and plt.plot the modulus abs of the result.
• Filter x by these two filters. plt.plot the output signals, in the time and frequency domain.
Conclude.
# Averaging f i l t e r
#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
# F i l t e r g which c o m p u t e s t h e mean on a p e r i o d o f 40 s a m p l e s
L=40
N= l e n ( x )
t =np . a r a n g e (N) / Fs
h= o n e s ( L ) / L
m_estimated= l f i l t e r ( h , [ 1 ] , x )
# ...
p l t . p l o t ( t , m _ e s t i m a t e d , t ,m)
p l t . t i t l e ( ’ S i g n a l and e s t i m a t e d d r i f t ’ )
Page 157/255
158 CHAPTER 11. DIGITAL FILTERS
#
# We c h e c k G( f )
plt . figure ()
H= f f t ( h , 1 0 0 0 )
p l t . p l o t ( f , 3 5 0 ∗ f f t s h i f t ( a b s (H) ) )
p l t . p l o t ( f , f f t s h i f t ( abs ( f f t ( x ) ) ) )
p l t . x l a b e l ( ’ Normalized f e q u e n c i e s ’ )
p l t . t i t l e ( ’ T r a n s f e r Function of the Averaging F i l t e r ’ )
plt . figure ()
p l t . p l o t ( f , abs ( f f t s h i f t ( f f t ( m_estimated ) ) ) )
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
[<matplotlib.lines.Line2D at 0x7fd29d710320>]
# Mean s u b t r a c t i n g f i l t e r
#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
# The f i l t e r h s u b t r a c t t h e mean computed o v e r a s l i d i n g window o f 40
samples
# h may be d e f i n e d a s
d= z e r o s ( L ) ; d [ 0 ] = 1
g=d−h
xc = l f i l t e r ( g , [ 1 ] , x )
p l t . p l o t ( t , xc )
Page 158/255
11.4. LAB – BASIC FILTERING 159
Page 159/255
160 CHAPTER 11. DIGITAL FILTERS
p l t . t i t l e ( ’ S i g n a l w i t h removed d r i f t ’ )
# p l t . show ( )
#
plt . figure ()
plt . p l o t ( f , f f t s h i f t ( a b s ( f f t ( xc ) ) ) )
plt . xlabel ( ’ Frequencies ’ )
plt . xlim ([ −0.5 , 0 . 5 ] )
plt . t i t l e ( ’ F o u r i e r t r a n s f o r m o f t h e s i g n a l w i t h removed d r i f t ’ )
#We c h e c k H( f )
plt . figure ()
G= f f t ( g , 1 0 0 0 )
p l t . p l o t ( f , a b s ( f f t s h i f t (G) ) )
p l t . x l a b e l ( ’ Normalized f e q u e n c i e s ’ )
p l t . t i t l e ( ’ Transfer Function of the S u b t r a c t i n g F i l t e r ’ )
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
We wish now to boost a range of freqencies aound 1000 Hz on the initial signal.
Page 160/255
11.4. LAB – BASIC FILTERING 161
Page 161/255
162 CHAPTER 11. DIGITAL FILTERS
Pratical part
• The vector of denominator’s A(z) coefficients will be computed according to
A=poly([p1,p2]), and you will check that you recover the hand-calculated coeffi-
cients.
• plt.plot the frequency response
• Compute the impulse response, according to # computing the IR d=zeros(300) d[1]=1
h_accentued=lfilter([1],a,d) (output to a Dirac impulse on 300 point). plot it.
• Compute and plot the impulse response obtained using the theoretical formula. Compare
it to the simulation.
• Compute and plot the output of the filter with input xc , both in the time and frequency
domain. Conclude.
# ...
# Compute t h e IR
# ...
# p l t . plot ( h_accentued )
# p l t . t i t l e ( ’ Impulse response of the boost f i l t e r ’)
# in frequency
# ...
# p l t . x l a b e l ( ’ Normalized f r e q u e n c i e s ’)
# p l t . xlim ([ −0.5 , 0 . 5 ] )
# p l t . t i t l e ( ’ T r a n s f e r Function of th e Boost F i l t e r ’)
# Filtering
# sig_accentuated = . . .
# ...
# p l t . x l a b e l ( ’ Time ’ )
# p l t . x l i m ( [ 0 , l e n ( x ) ∗ Ts ] )
# p l t . t i t l e ( ’ S i g n a l w i t h b o o s t e d 1000 Hz ’ )
# I n t h e f r e q u e n c y domain
# ...
# p l t . x l a b e l ( ’ Normalized f r e q u e n c i e s ’)
# p l t . x l i m ([ − Fs / 2 , Fs / 2 ] )
# p l t . t i t l e ( ’ F o u r i e r Transform of Boosted Signal ’)
• How can we simultaneously boost around 1000 Hz and remove the drift? Propose a filter that performs
the two operations.
# both f i l t e r i n g s :
# ...
# p l t . x l a b e l ( ’ Time ’ )
# p l t . x l i m ( [ 0 , l e n ( x ) ∗ Ts ] )
# p l t . t i t l e ( ’ C e n t e r e d S i g n a l w i t h B o o s t e d 1000 Hz ’ )
Page 162/255
11.5. THEORETICAL PART 163
a. We want to limit the number of coefficients to L (FIR). We thus have to clip-off the initial
impulse response. Compute the vector h with L coefficients corresponding to the initial
response, windowed by a rectangular window rectT (t), where T = L ∗ T s.
b. plt.plot the frequency response.
c. Comput and plt.plot the output of this filter subject to the input xc .
d. Observe the group delay of the frequency response:
plt.plot(f,grpdelay(B,A,N)). Comment.
Theoretical Part
Practical part
B=250
Fs =8000
B=B / Fs # Band i n n o r m a l i z e d f e q u e n c i e s
n=np . a r a n g e ( −15 0 , 1 50 )
def sinc ( x ) :
x=np . a r r a y ( x )
z =[ s i n ( n ) / n i f n !=0 e l s e 1 f o r n i n x ]
r e t u r n np . a r r a y ( z )
# ...
# p l t . xlabel ( ’ n ’)
# p l t . t i t l e ( ’ Impulse response ’)
# ...
def grpdelay ( h ) :
N= l e n ( h )
NN=1000
hn=h∗ np . a r a n g e (N)
num= f f t ( hn . f l a t t e n ( ) ,NN)
Page 163/255
164 CHAPTER 11. DIGITAL FILTERS
den = f f t ( h . f l a t t e n ( ) ,NN)
Mden=max ( a b s ( den ) )
# den [ a b s ( den ) <Mden / 1 0 0 ] = 1
Td= r e a l ( num / den )
Td [ a b s ( den ) <Mden / 1 0 ] = 0
r e t u r n num , den , Td
hh= z e r o s ( 2 0 0 )
# hh [ 2 0 : 2 5 ] = a r r a y ( [ 1 , −2, 7 0 , −2, 1 ] )
hh [ 2 4 ] = 1
# p l t . p l o t ( g r p d e l a y ( hh ) )
num , den , Td= g r p d e l a y ( h _ t r o n q )
plt . figure (3)
p l t . p l o t ( Td )
---------------------------------------------------------------------------
<ipython-input-16-2696f2851cc3> in <module>()
14 hh[24]=1
15 #plt.plot(grpdelay(hh))
---> 16 num,den,Td=grpdelay(h_tronq)
17 plt.figure(3)
18 plt.plot(Td)
Page 164/255
Random Signals
12
#Some s p e c i f i c i m p o r t s f o r p l o t t i n g
from p l o t _ r e a i m p o r t ∗
from p l o t _ s i g h i s t o i m p o r t ∗
%m a t p l o t l i b i n l i n e
165
166 CHAPTER 12. RANDOM SIGNALS
and if τ = nk
pX(n1 ),X(n2 ),...,X(nk ) = pX(n1 −nk ),X(n2 −nk ),...,X(0) .
Therefore, the joint distribution only depends on k − 1 parameters, instead of the k initial parameters.
As a consequence, we have that
• E [X(n)X(n − τ )∗ ] = RX (τ ) only depends on the delay between the two instants. In such a case, the
resulting function RX (τ ) is called a correlation function.
12.2.2 Ergodism
Definition
1
N∑
⟨X(n, ω )n ⟩ = lim X(n, ω )n .
N→+∞
[N]
Of course, in the general case, this time average is a random varaible, since it depends on ω .
Definition A random signal is said ergodic if its time averages are deterministic, i.e. non random, vari-
ables.
Important consequence
A really important consequence is that if a signal is both stationnary and ergodic, then the statistical
means and the time averages are equal.
E [•] = ⟨•⟩
• (moments) Check that if the signal is both stationnary and ergodic, then
[ ] 1
E X(n, ω )k = lim
N→+∞ N
∑ X(n, ω )k ,
[N]
1
RX (τ ) = E [X(n, ω )X(t − τ , ω )] = lim
N→+∞ N
∑ X(n, ω )X(n − τ , ω ).
[N]
Page 166/255
12.2. FUNDAMENTAL PROPERTIES 167
Experiment with the parameters (amplitude, number of samples). Is the signal stationary, er-
godic, etc?
import scipy . s t a t s as s t a t s
M = 10
# number o f b i n s i n h i s t o g r a m s
N = 1500 # Number o f s a m p l e s p e r r e a l i z a t i o n
K = 200 # T o t a l number o f r e a l i z a t i o n s
XGauss = s t a t s . norm ( l o c =0 , s c a l e =1 )
# S i n e wave p l u s n o i s e
X = 3 ∗ XGauss . r v s ( s i z e = (K, N) ) + 3 ∗ np . o u t e r (
np . o n e s ( ( K, 1 ) ) , np . s i n ( 2 ∗ np . p i ∗ np . a r a n g e (N) / N) )
p r i n t ( " S t a n d a r d d e v i a t i o n o f t i m e a v e r a g e s : " , np . s t d ( np . mean (X, a x i s = 1 ) ) )
# pylab . rcParams [ ’ f i g u r e . f i g s i z e ’] = ( 1 0 . 0 , 8 . 0 )
p l t . rcParams [ ’ f i g u r e . f i g s i z e ’ ] = (8 , 5)
p l o t _ r e a (X, nb =10 , f i g = 1 )
By varying the number of samples N, we see that the time average converges to zero, for each realization.
Thus we could say that this process is ergodic. However, the ensemble average converges to the sine wave
and is dependent if time: the process is not stationary.
XGauss = s t a t s . norm ( l o c =0 , s c a l e =1 )
# pylab . rcParams [ ’ f i g u r e . f i g s i z e ’] = ( 1 0 . 0 , 8 . 0 )
p l t . rcParams [ ’ f i g u r e . f i g s i z e ’ ] = (8 , 5)
d e f q 1 _ e x p e r i m e n t (N) :
K = 200
# S i n e wave p l u s n o i s e
X = 3 ∗ XGauss . r v s ( s i z e = (K, N) ) + 3 ∗ np . o u t e r (
np . o n e s ( ( K, 1 ) ) , np . s i n ( 2 ∗ np . p i ∗ np . a r a n g e (N) / N) )
p r i n t ( " S t a n d a r d d e v i a t i o n o f t i m e a v e r a g e s : " , np . s t d ( np . mean (X, a x i s = 1 )
))
p l o t _ r e a (X, nb =10 , f i g = 1 )
_ = i n t e r a c t ( q 1 _ e x p e r i m e n t , N= ( 0 , 2 0 0 0 , 1 0 ) )
Page 167/255
168 CHAPTER 12. RANDOM SIGNALS
Page 168/255
12.2. FUNDAMENTAL PROPERTIES 169
2- Consider now a sine wave with a random phase X(n, ω ) = A sin(2π f0 n + ϕ (ω )).
Experiment with the parameters (amplitude, number of samples). Is the signal stationary, ergodic, etc?
Also change the value of the frequency, and replace function sin by square which generates a pulse train
instead of a sine wave.
from p y l a b i m p o r t ∗
K = 100
N = 1000
fo = 2.2 / N
S = z e r o s ( ( K, N) )
f o r r i n r a n g e (K) :
S [ r , : ] = 1 . 1 ∗ s i n ( 2 ∗ p i ∗ f o ∗ a r a n g e (N) + 2 ∗ p i ∗ r a n d ( 1 , 1 ) )
p l o t _ r e a ( S , f i g =2)
This example shows that a random signal is not necessarily noisy and irregular. Here we have a random
signal which is ‘smooth’. The random character is introduced by the random phase, which simply reflects
that we do not know the time origin of this sine wave.
Here, we see that both the time average and the ensemble average converge to zero. Therefore, we can
conclude that this signal is stationary and ergodic.
Let us now define a square wave:
def square ( x ) :
""" square ( x ) : \ n
R e t u r n s a p u l s e t r a i n w i t h p e r i o d : math : ‘ 2 \ p i ‘
"""
return sign ( sin (x) )
Page 169/255
170 CHAPTER 12. RANDOM SIGNALS
Again, we see that both means tend to zero, a constant, which means that the signal is stationary (its
ensemble average does not depend of time) and ergodic (its time average does not depend on the actual
realization).
3- Compute and analyze the histograms of two white noises, respectively with a uniform and
a Gaussian probability density function, using the lines in script q1c. Do this for several
realizations (launch the program again and again) and change the number of points and of bins.
Compare the two signals. What do you think of the relation between whiteness and gaussianity.
Page 170/255
12.2. FUNDAMENTAL PROPERTIES 171
(0, 0.438836508441576)
Uniform distribution: Value of the mean : 0.500 and of the variance 0.083
Gauss distribution: Value of the mean : 0.000 and of the variance 1.000
[<matplotlib.lines.Line2D at 0x7f0a836bc518>]
from p l o t _ s i g h i s t o i m p o r t ∗
p l o t _ s i g h i s t o ( x _ u n i . r v s ( s i z e =N) , f i g =1 )
p l o t _ s i g h i s t o ( x _ g a u s s . r v s ( s i z e =N) , f i g =2 )
We see that the Gaussian noise is more concentrated on its mean 0, and exhibits more important values,
while the uniform noise is confined into the interval [0,1].
Concerning the question on the relation between whiteness and Gaussianity, actually, there is no relation
between these two concepts. A white noise can be distributed according to any distribution, and a Gaussian
sequence is not necessarily iid (white).
Page 171/255
172 CHAPTER 12. RANDOM SIGNALS
Page 172/255
12.3. SECOND ORDER ANALYSIS 173
△ 1 N
RXY (k) = E [X(n, ω )Y ∗ (n − k, ω )] = lim
erg N→+∞ N
∑ X(n, ω )Y ∗ (n − k, ω ),
n=0
△ 1 N
∗
RXX (k) = E [X(n, ω )X (n − k, ω )] = lim
erg N→+∞ N
∑ X(n, ω )X ∗ (n − k, ω ).
n=0
Main properties
1. (Hermitian symmetry)
2. (Symmetry for the autocorrelation). In the case of the autocorrelation function, the hermitian symme-
try reduces to
RXX (τ ) = R⋆XX (−τ ).
Page 173/255
174 CHAPTER 12. RANDOM SIGNALS
This shows that RXX (0) is nothing but the power of the signal under study. Observe that necessarily
RXX (0) > 0.
and using the scalar product $ <x_1,x_2> = E[X1 (n)X2∗ (n)] $, weget
[ ]
proof: develop E | ∑i λi X(τi )|2 ≥ 0
(Correlation coefficient). By the maximum property, the correlation coefficient
RY X (τ )
ρXY (τ ) = √
RXX (0)RYY (0)
Exercises
[ ]
1. Developing E |X + λ Y |2 into a polynom of λ and observing that this polynom in always nonegative,
prove the Schwarz inequality.
2. Consider a random signal U(n, ω ) defined on the interval [0, N]. Define the periodic signal
3. Consider a random signal X(n, ω ) with autocorrelation RXX (k) and define
Page 174/255
12.3. SECOND ORDER ANALYSIS 175
1 N
RXX (k)= lim
erg N→+∞ N
∑ X(n, ω )X ∗ (n − k, ω ).
n=0
Given a finite number of points N, with data known from n = 0..N − 1, it is thus possible to approximate the
correlation function by a formula like
1 N−1
RXX (k) = ∑ X(n, ω )X ∗ (n − k, ω ).
N n=0
If we take k ≥ 0, we see that X ∗ (n − k, ω ) is unavailable for k > n. Consquently, the sum must go from n = k
to N − 1. At this point, people define two possible estimators. The first one is said “unbiased” while the
second is “biased” (check this by computing the expectation E [•] of the two estimators).
1 N−1
∑ X(n, ω )X ∗ (n − k, ω )
(unbiased)
R̂XX (k) = (12.1)
N − k n=k
1 N−1
∑ X(n, ω )X ∗ (n − k, ω ).
(biased)
R̂XX (k) = (12.2)
N n=k
For the biased estimator, it can be shown (Bartlett) that the variance has the form
[ ] 1 +∞
∑ ρ (m)2 + ρ (m + k)ρ (m − k) − 4ρ (m)ρ (k)ρ (m − k) + 2ρ (m)2 ρ (k)2 ,
(biased)
Var R̂XX (k) ≈
N m=−∞
that is, essentially a constant over N. As far the unbiased estimator is concerned, we will have a factor
N/(N −k), and we see that this time the variance increases with k. Thus, though it is unbiased, this estimator
has a very bad behaviour with respect to the variance.
This is checked below. First we generate a gaussian white noise, compute the two estimates of the
correlation function and compare them.
from c o r r e l a t i o n i m p o r t x c o r r
from s c i p y i m p o r t s t a t s a s s t a t s
N = 100
XGauss = s t a t s . norm ( l o c =0 , s c a l e =1 )
S = XGauss . r v s ( s i z e =N)
#
R b i a s e d , l a g s = x c o r r ( S , norm= ’ b i a s e d ’ )
R u n b i a s e d , l a g s = x c o r r ( S , norm= ’ u n b i a s e d ’ )
Rtheo = z e r o s ( s i z e ( Rbiased ) )
R t h e o [ l a g s == 0 ] = 1
Rt = o n e s ( 1 )
f i g , ax = s u b p l o t s ( 3 , 1 , f i g s i z e = ( 7 , 7 ) , s h a r e x = True , s h a r e y = T r u e )
# biased correlation
ax [ 1 ] . p l o t ( l a g s , R b i a s e d )
# ax [ 0 ] . a x v l i n e ( 0 , ymin =0 , ymax =1 , c o l o r = ’ r ’ , lw =3 )
ax [ 1 ] . s e t _ t i t l e ( " B i a s e d C o r r e l a t i o n f u n c t i o n " )
ax [ 1 ] . s e t _ x l a b e l ( " D e l a y " )
ax [ 1 ] . a x i s ( ’ t i g h t ’ ) # T i g h t l a y o u t o f t h e a x i s
# unbiased c o r r e l a t i o n
ax [ 2 ] . p l o t ( l a g s , R u n b i a s e d )
Page 175/255
176 CHAPTER 12. RANDOM SIGNALS
ax [ 2 ] . s e t _ t i t l e ( " U n b i a s e d C o r r e l a t i o n f u n c t i o n " )
ax [ 2 ] . s e t _ x l a b e l ( " D e l a y " )
# theoretical correlation
ax [ 0 ] . s t e m ( [ 0 ] , [ 1 ] , l i n e f m t = ’ r−’ , m a r k e r f m t = ’ r o ’ , b a s e f m t = ’ r−’ )
ax [ 0 ] . p l o t ( [ l a g s [ 0 ] , l a g s [ − 1 ] ] , [ 0 , 0 ] , ’ r ’ )
ax [ 0 ] . s e t _ t i t l e ( " T r u e C o r r e l a t i o n f u n c t i o n " )
fig . tight_layout ()
ax [ 1 ] . a x i s ( ’ t i g h t ’ )
ax [ 0 ] . s e t _ y l i m ( [ − 0 . 5 , 1 . 2 ] )
(-0.5, 1.2)
N = 1000
f0 = 0.05
t = np . l i n s p a c e ( 0 , 4 0 0 , N)
Page 176/255
12.3. SECOND ORDER ANALYSIS 177
x = 1 ∗ square (2 ∗ pi ∗ f0 ∗ t )
n o i s e = s t a t s . norm ( l o c =0 , s c a l e = 2 ) . r v s (N)
observation = x + noise
#
Plot the correlation of the noisy signal. Are you able to retrieve the unknown periodicities? Experiment with
the parameters. Conclusion.
p l t . p l o t ( t , x , ’− ’ )
p l t . plot ( t , observation , alpha =0.7)
#
R b i a s e d , l a g s = x c o r r ( o b s e r v a t i o n , norm= ’ b i a s e d ’ , m a x l a g s =5 00 )
plt . figure ()
p l t . p l o t ( lags , Rbiased )
p l t . g r i d ( b= T r u e )
Page 177/255
178 CHAPTER 12. RANDOM SIGNALS
The last figure shows the correlation of the noisy periodic signal. This correlation is simply the super-
position of the correlation of the noise and of the correlation of the signal (Check it!)
Since the correlation of the noise (a Dirac impulse) is concentrated at zero, we can read - the period of the
signal: 50 (that is a relative frequency of 50/1000=0.05) - the power of the signal: 0.5 - the power of the
noise: 4.5 - 0.5 = 4 (was generated with a standard deviation of 2). The correlation function then enable us
to grasp many informations that were not apparent in the time series!
from s c i p y . s i g n a l i m p o r t l f i l t e r
from s c i p y . f f t p a c k i m p o r t f f t , i f f t
%m a t p l o t l i b i n l i n e
12.4 Filtering
12.4.1 General relations for cross-correlations
We consider a situation where we want to study the correlations between the different inputs and outputs of
a pair of two filters:
{
Y1 (n, ω ) = (X1 ∗ h1 )(n, ω ),
Y2 (n, ω ) = (X2 ∗ h2 )(n, ω ),
Let us compute the intercorrelation between Y1 (n) and Y2 (n) :
RY1Y2 (m) = E [Y1 (n, ω )Y2∗ (n − m, ω ))] = E [(X1 ∗ h1 )(n, ω ))(X2∗ ∗ h∗2 )(n − m, ω ))] .
The two convolution products are
and
[ ]
RY1Y2 (m) = E ∑ X1 (n − u, ω ))h1 (u) ∑ X2∗ (n − m − v, ω ))h∗2 (v)
u v
[ ]
= E ∑∑ X1 (n − u)h1 (u)X2∗ (n − m − v)h∗2 (v)
u v
= ∑ ∑ h1 (u)RX X (m + v − u)h∗2 (v).
1 2
u v
Page 178/255
12.4. FILTERING 179
Looking at the sum over u, we recognize a convolution product between h1 and RX1 X2 , expressed at time
(m + v) :
(−)
where we have noted h2 (v) = h2 (−v). In this last relation, we recognize anoter convolution product, this
∗(−)
time between (h1 ∗ RX1 X2 ) and h2 :
∗(−)
RY1Y2 (m) = ∑(h1 ∗ RX X )(m + v)h2
1 2 (−v)
v
∗(−)
= ∑(h1 ∗ RX X )(m − v′ )h2
1 2 (v′ )
v′
( )
∗(−)
= h1 ∗ RX1 X2 ∗ h2 (m).
12.4.2 By-products
• [Autocorrelation of the output of a filter] With a single filter we can apply the previous formula,
with {
X1 = X2 = X,
h1 = h2 = h.
Of course Y1 = Y2 = Y , and
( )
RYY (m) = h ∗ RXX ∗ h∗(−) (m) .
• [Cross correlation between output and input] We want to measure the correlation between the input
and output of a filter. Toward this goal, we consider
X1 = X2 = X,
Y1 = Y,
Y = X, h1 = h,
2
h2 = h.
12.4.3 Examples
We study now the filtering of random signals. We begin with the classical impulse response
h(n) = an , with x(n) a uniform distributed white noise at the input, and we denote y(n) the
output.
Page 179/255
180 CHAPTER 12. RANDOM SIGNALS
1. Filter the signal x(n), with the help of the function lfilter. Compare the input and
output signals, and in particular their variations. Compare the histograms. Look at the
Fourier transform of the output. Do this for several values of a, beginning with a = 0.9.
2. Using the function xcorr (import it via from correlation import xcorr),
compute all the possible correlations between the input and the output. What would be the
correlation matrix associated with the signal x(n) = [x(n) y(n)]t ? Compare the impulse
response h to the cross-correlation Ryx(k). Explain. Experiment by varying the number
of samples N and a (including its sign).
3. Consider the identification of the impulse response by cross-correlation, as above, but in
the noisy case. Add a Gaussian noise to the output and compute the cross-correlation.
Observe, comment and experiment with the parameters.
The filtering is done thanks to the function lfilter. We have first to import it, eg as
from scipy.signal import lfilter
We will also need to play with ffts so it is a good time to import it from fftpack
from scipy.fft import fft, ifft
N = 1000 #Number o f s a m p l e s
x = s t a t s . u n i f o r m ( − 0 . 5 , 1 ) . r v s (N)
a = 0.9
# F i l t e r i n g and p l o t s . . .
# FILL IN . . .
y = l f i l t e r ( [ 1 ] , [ 1 , −a ] , x )
f i g u r e ( f i g s i z e =(8 , 3) )
plot (x)
x l a b e l ( " Time " )
t i t l e ( " I n i t i a l signal " )
f i g u r e ( f i g s i z e =(8 , 3) )
plot (y)
x l a b e l ( " Time " )
t i t l e ( " Filtered signal " )
We see that the output has slower variations than the input. This is the result of the filtering operation.
Let us now look at the histograms:
# Histograms
# FILL IN
# Histograms
f i g u r e ( f i g s i z e =(8 , 3) )
p l t . h i s t ( x , b i n s =20 , r w i d t h = 0 . 9 5 )
p l t . x l a b e l ( " Amplitude " )
plt . t i t l e ( " I n i t i a l signal " )
f i g u r e ( f i g s i z e =(8 , 3) )
p l t . h i s t ( y , b i n s =20 , r w i d t h = 0 . 9 5 )
p l t . x l a b e l ( " Amplitude " )
plt . t i t l e ( " Filtered signal " )
Page 180/255
12.4. FILTERING 181
Page 181/255
182 CHAPTER 12. RANDOM SIGNALS
While the initial signal is uniformly distributed, the histogram of the output looks like the histogram
of a gaussian. Actually, this is related to the central limit theorem: the mixture of iid variables tends to a
gaussian. This also explains th emodification of the amplitudes observed on the time signal.
Let us finally look at the Fourier transform:
# FILL IN
f = a r a n g e (N) / N − 0 . 5
f i g , ax = s u b p l o t s ( 2 , 1 , f i g s i z e = ( 7 , 5 ) )
ax [ 0 ] . p l o t ( f , a b s ( f f t s h i f t ( f f t ( x ) ) ) )
ax [ 0 ] . s e t _ t i t l e ( " F o u r i e r t r a n s f o r m o f t h e i n p u t " )
ax [ 0 ] . s e t _ x l a b e l ( " F r e q u e n c y " )
ax [ 0 ] . a x i s ( ’ t i g h t ’ ) # T i g h t l a y o u t o f t h e a x i s
ax [ 1 ] . p l o t ( f , a b s ( f f t s h i f t ( f f t ( y ) ) ) )
ax [ 1 ] . s e t _ t i t l e ( " F o u r i e r t r a n s f o r m o f t h e o u t p u t " )
ax [ 1 ] . s e t _ x l a b e l ( " F r e q u e n c y " )
fig . tight_layout ()
ax [ 1 ] . a x i s ( ’ t i g h t ’ )
Page 182/255
12.4. FILTERING 183
legend ( )
axis ( ’ tight ’ )
_ = x l a b e l ( " Frequency " )
from c o r r e l a t i o n i m p o r t x c o r r
N = 1000 #Number o f s a m p l e s
Page 183/255
184 CHAPTER 12. RANDOM SIGNALS
x = s t a t s . u n i f o r m ( − 0 . 5 , 1 ) . r v s (N)
a = 0.8
y = l f i l t e r ( [ 1 ] , [ 1 , −a ] , x )
L = 30
Rxx , l a g s = x c o r r ( x , x , m a x l a g s =L )
Rxy , l a g s = x c o r r ( x , y , m a x l a g s =L )
Ryx , l a g s = x c o r r ( y , x , m a x l a g s =L )
Ryy , l a g s = x c o r r ( y , y , m a x l a g s =L )
f i g , ax = s u b p l o t s ( 2 , 2 , f i g s i z e = ( 7 , 5 ) )
a x f = ax . f l a t t e n ( )
R t i t l e s = ( ’ Rxx ’ , ’ Rxy ’ , ’ Ryx ’ , ’ Ryy ’ )
f o r k , z i n e n u m e r a t e ( ( Rxx , Rxy , Ryx , Ryy ) ) :
axf [ k ] . plot ( lags , z )
axf [ k ] . s e t _ t i t l e ( R t i t l e s [ k ] )
fig . tight_layout ()
We have represented above all the possible correlations between the input and the ouput. This represen-
tation corresponds to the correlation matrix of the vector z(n) = [x(n) y(n)]H that would give
{[ ] } [ ]
x(n) [ ∗ ∗
] Rxx (k) Rxy (k)
E x(n − k) y(n − k) =
y(n) Ryx (k) Ryy (k)
Page 184/255
12.4. FILTERING 185
Uniform distribution: Value of the mean : 0.500 and of the variance 0.083
In the noisy case, the same kind of observations hold. Indeed, if z is a corrupted version of y, with
z(n) = y(n) + w(n), then
Rzx (k) = Ryx (k) + Rwx (k) = Ryx (k)
provided that x and w are uncorrelated, which is reasonable assumption.
N = 1000
#Remember t h a t t h e v a r i a n c e o f $x$ i s g i v e n by
x_uni = s t a t s . uniform ( −0.5 , 1)
Page 185/255
186 CHAPTER 12. RANDOM SIGNALS
Uniform distribution: Value of the mean : 0.000 and of the variance 0.083
N = 1000 #Number o f s a m p l e s
x = s t a t s . u n i f o r m ( − 0 . 5 , 1 ) . r v s (N) # g e n e r a t e s N v a l u e s f o r x
a = 0.8
y = l f i l t e r ( [ 1 ] , [ 1 , −a ] , x ) # Computes t h e o u t p u t o f t h e s y s t e m
w = s t a t s . norm ( 0 , 1 ) . r v s (N) # G a u s s i a n n o i s e
y = y + 0.5 ∗ w
L = 50
Ryx , l a g s = x c o r r ( y , x , m a x l a g s =L ) # t h e n t h e c r o s s −c o r r e l a t i o n
d = z e r o s (N)
d [0] = 1
h = l f i l t e r ( [ 1 ] , [ 1 , −a ] , d ) # and t h e i m p u l s e r e s p o n s e
p l o t ( a r a n g e ( L ) , Ryx [ a r a n g e ( L , L + L ) ] , l a b e l = " I n t e r c o r r e l a t i o n $R_{ yx } ( k ) $ " )
p l o t ( a r a n g e ( L ) , v ∗ h [ a r a n g e ( L ) ] , l a b e l = " I m p u l s e r e s p o n s e $h ( k ) $ " )
x l a b e l ( " Lags $k$ " )
g r i d ( True )
legend ( )
Page 186/255
12.5. ANALYSE DANS LE DOMAINE FRÉQUENTIEL 187
Hence, we see that identification of a system is possible by cross-correlation, even when dealing with
noisy outputs. Such identification would be impossible by direct measurement of the IR, because of the
presence of noise.
Conséquences :
Page 187/255
188 CHAPTER 12. RANDOM SIGNALS
SY1Y2 ( f ) = 0.
On en déduit que
RY1Y2 (τ ) = TF−1 [SY1Y2 ( f )] = TF−1 [0] = 0.
si les filtres sont disjoints en fréquence, l’intercorrélation des sorties est nulle.
Application Considérons deux filtres parfaits autour de deux fréquences pures f1 et f2 , de même entrée
X(n, ω ). On a Y1 (n, ω ) = X( f1 , ω ) exp (− j2π f1 n), et Y2 (n, ω ) = X( f2 , ω ) exp (− j2π f2 n), avec toutes les
précautions d’usage sur la << non existence >> de la transformée de Fourier considérée pour des signaux
aléatoires stationnaires. Dans ces conditions,
soit
E [X( f1 , ω )X ∗ ( f2 , ω )] = 0.
On dit que les composantes spectrales sont décorrélées.
Si on appelle DXX ( f ) la densité spectrale de puissance d’un signal aléatoire X(n, ω ), alors la puissance du
signal portée par les composantes fréquentielles comprises entre f1 et f2 s’écrit
∫ f2
PXX ( f ∈ [ f1 , f2 ]) = DXX ( f )d f .
f1
[ ] 1 +N
PXX = E |X(n, ω )|2 = RXX (0) = lim
N→+∞ 2N
∑ |X(n, ω )|2 .
−N
Par ailleurs,
∫ +1
2
RXX (τ ) = SXX ( f ) exp ( j2π f τ ) d f ,
− 12
soit, pour τ = 0,
∫ +1
2
RXX (0) = PXX = SXX ( f ) d f .
− 12
Page 188/255
12.5. ANALYSE DANS LE DOMAINE FRÉQUENTIEL 189
La transformée de Fourier SXX ( f ) de la fonction d’autocorrélation est ainsi une bonne candidate pour être
la densité spectrale de puissance. Notons cependant, cette dernière relation ne prouve pas qu’elle le soit.
Considérons un filtre parfait, dont le module de la fonction de transfert est d’amplitude un dans une
bande ∆ f centrée sur une fréquence f0 , et nul ailleurs :
{
|H( f )| = 1 pour f ∈ [ f0 − ∆2f , f0 + ∆2f ]
|H( f )| = 0 ailleurs.
Notons Y (n, ω ) = (h ∗ X)(n, ω ) la réponse de ce filtre à une entrée X(n, ω ). La puissance de la sortie est
donnée par
∫ +1
2
PYY = RYY (0) = SYY ( f ) d f ,
− 12
ce qui correspond bien à la définition de la densité spectrale de puissance : la puissance pour les composantes
spectrales comprises dans un intervalle est bien égale à l’intégrale de la densité spectrale de puissance sur
cet intervalle. Si ∆ f est suffisamment faible, on pourra considérer la densité spectrale de puissance SXX ( f )
comme approximativement constante sur l’intervalle, et
∆f ∆f
PYY ( f ∈ [ f0 − , f0 + ]) ≃ SXX ( f0 )∆ f .
2 2
Cette dernière relation indique que la densité spectrale de puissance doit s’exprimer en Watts par Hertz. Par
ailleurs, lorsque ∆ f tend vers 0, la puissance recueillie est de plus en plus faible. Pour ∆ f = 0, la puissance
obtenue est ainsi normalement nulle, sauf si la densité spectrale elle-même est constituée par une << masse
>> de Dirac (de largeur nulle mais d’amplitude infinie) à la fréquence considérée.
Notons que le filtre que nous avons défini ci-dessus n’est défini, par commodité de présentation, que pour
les fréquences positives. Sa fonction de transfert ne vérifie donc pas la propriété de symétrie hermitienne
des signaux réels : la réponse impulsionnelle associée est donc complexe et la sortie Y (t, ω ) également
complexe. En restaurant cette symétrie, c’est-à-dire en imposant H( f ) = H ∗ (− f ), ce qui entraîne (notez le
module de f ) {
|H( f )| = 1 pour | f | ∈ [ f0 − ∆2f , f0 + ∆2f ]
|H( f )| = 0 ailleurs,
la puissance en sortie est
∫ − f0 + ∆ f ∫ f0 + ∆ f
2 2
PYY = SXX ( f ) d f + SXX ( f ) d f .
− f0 − ∆2f f0 − ∆2f
La densité spectrale de puissance d’un signal aléatoire réel est une fonction paire, ce qui conduit enfin à
∫ f0 + ∆ f
2
PYY = 2 SXX ( f ) d f ,
f0 − ∆2f
Page 189/255
190 CHAPTER 12. RANDOM SIGNALS
relation qui indique que la puissance se partage équitablement dans les fréquences positives et négatives.
Exemple :
où A(ω ) est une variable aléatoire centrée de variance σ 2 et ϕ (ω ) uniformément répartie sur [0, 2π ]. La
fonction d’autocorrélation de ce signal vaut
σ2
RXX (τ ) = cos(2π f0 τ ).
2
Par transformée de Fourier, on obtient la densité spectrale :
σ2
SXX ( f ) = [δ ( f + f0 ) + δ ( f − f0 )].
4
Enfin, en intégrant la densité spectrale
∫
σ2 σ2
[δ ( f + f0 ) + δ ( f − f0 )]d f = ,
4 2
on retrouve la puissance de la sinusoïde, σ 2 /2, comme il se doit.
Les fonctions de corrélation et les densités spectrales de puissance forment des paires de transformées
de Fourier :
SXX ( f ) ⇌ RXX (τ ),
SXY ( f ) ⇌ RXY (τ ),
où SXX ( f ), SXY ( f ) sont les densités spectrale de puissance et de puissance d’interaction, respectivement.
Ces relations constituent le théorème de Wiener-Kintchine-Einstein.
N = 2000
a = −0.8
x = s t a t s . norm ( 0 , 1 ) . r v s ( ( N) )
y = l f i l t e r ([1] , [1 , a ] , x)
Yf = f f t ( y )
Py = 1 / N ∗ a b s ( Yf ) ∗∗2
f = f f t f r e q (N)
f = np . l i n s p a c e ( − 0 . 5 , 0 . 5 , N)
Sy = a b s ( 1 / a b s ( f f t ( [ 1 , a ] , N) ) ∗ ∗ 2 )
p l t . p l o t ( f , f f t s h i f t ( Py ) , a l p h a = 0 . 6 5 , l a b e l = " P e r i o d o g r a m " )
p l t . p l o t ( f , f f t s h i f t ( Sy ) , c o l o r = " y e l l o w " , lw =2 , l a b e l = " T r u e s p e c t r u m " )
p l t . legend ( )
#
# Smoothing
#
Ry = i f f t ( Py )
hh = s i g . hamming ( 2 0 0 , sym= T r u e )
Page 190/255
12.5. ANALYSE DANS LE DOMAINE FRÉQUENTIEL 191
z = np . z e r o s (N)
L = 100
h = f f t s h i f t ( s i g . windows . hann ( L , sym= T r u e ) )
z [ 0 : round (L / 2) ] = h [ 0 : round (L / 2) ]
z [−1:− r o u n d ( L / 2 ) − 1: −1] = h[−1:− r o u n d ( L / 2 ) − 1: −1]
Py_smoothed = a b s ( f f t ( z ∗ Ry ) )
p l t . p l o t ( f , f f t s h i f t ( Py ) , a l p h a = 0 . 6 , l a b e l = " P e r i o d o g r a m " )
p l t . p l o t ( f , f f t s h i f t ( Sy ) , lw =2 , c o l o r = " y e l l o w " , l a b e l = " T r u e s p e c t r u m " )
plt . plot (
f,
f f t s h i f t ( Py_smoothed ) ,
alpha =0.7 ,
c o l o r =" l i g h t g r e e n " ,
lw =2 ,
l a b e l = " Smoothed \ n P e r i o d o g r a m " )
_ = p l t . legend ( )
f i g c a p t i o n ( " Smoothed P e r i o d o g r a m " )
# Averaging
d e f a v e r a g e d _ p e r i o ( y , M) :
N = np . s i z e ( y )
L = i n t ( np . r o u n d (N / M) )
P y _ a v e r a g e d = np . z e r o s (N)
f o r m i n r a n g e (M) :
P y _ a v e r a g e d += 1 / L ∗ ( a b s ( f f t ( y [m ∗ L : ( m + 1 ) ∗ L ] , N) ) ∗ ∗ 2 )
r e t u r n Py_averaged / M
p l t . p l o t ( f , f f t s h i f t ( Py ) , a l p h a = 0 . 6 , l a b e l = " P e r i o d o g r a m " )
p l t . p l o t ( f , f f t s h i f t ( Sy ) , lw =2 , c o l o r = " y e l l o w " , l a b e l = " T r u e s p e c t r u m " )
plt . plot (
f,
f f t s h i f t ( Py_averaged ) ,
alpha =0.7 ,
Page 191/255
192 CHAPTER 12. RANDOM SIGNALS
c o l o r =" l i g h t g r e e n " ,
lw =2 ,
l a b e l =" Averaged \ nPeriodogram " )
_ = p l t . legend ( )
f i g c a p t i o n ( " Averaged Periodogram " )
12.6 Applications
12.6.1 Matched filter
We consider a problem frequently encountered in practice, in applications as echography, seismic reflexion,
sonar or radar. The problem at hand is as follows: we look for a known waveform s(n), up to a delay n0 in a
mixture
y(n) = As(n − n0 ) + v(n),
where A and n0 are unknowns and v(n) is an additive noise. The problem is to find the delay n0 , which
typically corresponds to a time-to-target. In order to do that, supppose that we filter the mixture by a filter
with impulse response h. The ouput has the form
with x(n) = A[h ∗ s](n − n0 ) and w(n) = [h ∗ v](n), respectively the outputs of the signal and noise part.
Clearly, if v(n) is stationnary, so is w(n). Therefore, the idea is to design h so that the signal output is as
greater as possible than the noise output, at time n0 . In statistical terms, we put this as choosing the filter
such that ratio of the signal output’power to the noise output’s power is maximum. Hence, our goal is to
design a filter which maximizes the signal-to-noise ratio at time n0 . We suppose that the desired signal is
deterministic and thus consider its instantaneous power |x(n0 )|2 .
The signal-to-noise ratio at time n0 is
|x(n0 )|2
SNR(n0 ) = .
E [|w(n)|2 ]
Page 192/255
12.6. APPLICATIONS 193
Of course, both the numerator and the denominator depends on the filter. Lets us first consider the numerator.
We have
In order to maximize the signal-to-noise ratio we invoke the Cauchy-Schwarz inequality. Recall that that
this inequality states that given to integrable functions f and g and a positive measure w, then
∫ 2 ∫ ∫
f (x)g(x)∗ w(x)dx ≤ | f (x)|2 w(x)dx |g(x)|2 w(x)dx
with equality if and only if f (x) = kg(x) for any arbitrary real constant k.
Page 193/255
194 CHAPTER 12. RANDOM SIGNALS
The idea is to apply this inequality in order to simplify the SNR(n0 ). For that, let us express the numer-
ator as ∫ ∫ √ S( f )
H( f )S( f )d f = H( f ) SVV ( f ) √ df.
SVV ( f )
By the Cauchy-Schwarz inequality, we then get that
∫ 2 ∫ 2
∫
H( f )S( f )d f ≤ |H( f )| SVV ( f )d f √
2 S( f )
df
SVV ( f )
Injecting this inequality in the SNR(n0 ) we obtain that
2
∫
S( f )
SNR(n0 ) ≤ √ df .
SVV ( f )
This shows that the SNR at n0 is upper bounded by a quantity which is independent of H( f ). Furthermore,
by the conditions for equality in the Cauchy-Schwartz inequality, we have that the bound is attained if and
only if
S( f )∗
H( f ) = k .
SVV ( f )
In the special case where v(n) is a white, then SVV ( f ) is a constant, say SVV ( f ) = σ 2 , and
H( f ) = k′ S( f )∗ .
By inverse Fourier transform, the corresponding impulse response is nothing but
h(n) = k′ s(−n)∗ ,
that is, the complex conjugate and reversed original waveform. This will be important to link the output
of the filter to an estimate of the cross-correlation function. For now, let us also observe that the general
transfer function H( f ) can be interpreted as a a whitening operation followed by the matched filter for an
additive white noise:
S( f )∗ 1 S( f )∗
H( f ) = k =k√ ×√
SVV ( f ) SVV ( f ) SVV ( f )
| {z } | {z }
whitening matched filter
Finally, the output of the matched filter can be viewed as the computation of an estimated of the cross-
correlation function. Indeed, the output of the h(n) with input x is
y(n) = ∑ h(l)x(n − l) (12.7)
l
= ∑ s(−l)∗ x(n − l) (12.8)
l
= ∑ s(m)∗ x(n + m) (12.9)
m
= R̂xs (n), (12.10)
where R̂xs (n) is, up to a factor, an estimate of the cross-correlation between x and s. Applying this remark
to our initial mixture
y(n) = As(n − n0 ) + v(n)
we get that
z(n) = AR̂ss (n − n0 ) + R̂vs (n).
Finally, since v and s are uncorrelated, R̂vs (n) ≃ 0 and since R̂ss (n) is maximum at zero, we see that the
output will present a peak at n = n0 , thus enabling to locate the value of the delay n0 .
Page 194/255
12.6. APPLICATIONS 195
where the ti are the delays associated with each interface and Ai the reflection coefficients.
In order to localize the interfaces, we use a matched filter, which maximizes the signal to noise
ratio.
1. Implement the matched filter. Examine the different signals. Is it possible to detect the
positions of the interfaces on the time series? using the correlation functions? What is the
interest to choose a stimulation signal with a very peaky autocorrelation?
2. Consider a noisy version of the observation (add a Gaussian noise with standard deviation
A). Compute the output of the matched filter, with impulse response h(n) = s(−n) and
introduce a threshold at 3.3 times the noise standard deviation. Interpret this threshold.
Conclusions. Experiment with the level of noise, the number of samples, etc
d e f z e r o p a d ( v , N) :
a = z e r o s (N)
a [ arange ( len ( v ) ) ] = v
return a
N = 1000
# I n t e r f a c e d e t e c t i o n by c r o s s −c o r r e l a t i o n
t = np . a r a n g e ( 1 0 0 )
A = 0.5
s = 1 ∗ sin (2 ∗ pi ∗ 0.01 ∗ (1 + 0.1 ∗ t ) ∗ t ) # emitted signal
figure ()
plot ( t , s )
t i t l e ( ’ Emitted s i g n a l ’ )
# L i s t of i n t e r f a c e s
pos = a r r a y ( [ 2 5 0 , 300 , 500 , 550 , 7 0 0 ] )
amp = a r r a y ( [ 1 , 1 , 1 , 1 , 0 . 5 ] )
g = z e r o s (N)
g [ p o s ] = amp
y = np . c o n v o l v e ( s , g )
z = y + A ∗ randn ( s i z e ( y ) )
figure (2)
plot (z)
t i t l e ( ’ Noisy o b s e r v a t i o n ’ )
figure (3)
plot (y)
t i t l e ( ’ Noiseless observation ’ )
Page 195/255
196 CHAPTER 12. RANDOM SIGNALS
Page 196/255
12.6. APPLICATIONS 197
from c o r r e l a t i o n i m p o r t x c o r r
sp = zeropad ( s , l e n ( z ) )
figure (4)
Rzs , l a g s = x c o r r ( z , s p )
p l o t ( l a g s , Rzs )
t i t l e ( ’ C r o s s −c o r r e l a t i o n ( n o i s y c a s e ) ’ )
figure (5)
Rys , l a g s = x c o r r ( y , s p )
p l o t ( l a g s , Rys )
t i t l e ( ’ C r o s s −c o r r e l a t i o n ( n o i s e l e s s c a s e ) ’ )
Finally, we introduce a threshold in order to eliminate the peaks due to the noise. For that, we compute
the threshold so as to have less than some fixed probability to exceed this level.
The method interval of an object stats.norm returns the endpoints of the range that contains
alpha percents of the distribution.
i n t e r v = s t a t s . norm . i n t e r v a l ( a l p h a = 0 . 9 9 9 , l o c =0 , s c a l e = 1)
print ( interv )
(-3.2905267314918945, 3.2905267314919255)
Rzs_th = a r r a y ( [
Rzs [ u ] i f ( Rzs [ u ] < i n t e r v s [ 0 ] o r Rzs [ u ] > i n t e r v s [ 1 ] ) e l s e 0
Page 197/255
198 CHAPTER 12. RANDOM SIGNALS
Page 198/255
12.6. APPLICATIONS 199
f o r u i n r a n g e ( LR )
])
f i g , ax = s u b p l o t s ( 1 , 1 , f i g s i z e = ( 8 , 3 ) )
ax . p l o t ( l a g s , R z s _ t h )
p r i n t ( " The p o s i t i o n o f i n t e r f a c e s a r e a t " , where ( R z s _ t h ! = 0 ) [ 0 ] + l a g s [ 0 ] )
The position of interfaces are at [249 250 251 299 300 301 499 500 501 549 550 551
Quick and Dirty thing to find the “center” of consecutive value ranges
def f i n d _ c e n t e r ( v ) :
Beg = v [ 0 ]
Endy = v [ 0 ]
u = 0
C = []
for k in range (1 , len ( v ) ) :
i f ( v [ k ] − v [ k − 1]) in (1 , 2) :
Endy = Endy + 1
else :
C . a p p e n d ( ( Endy + Beg ) / 2 )
u = u + 1
Beg = v [ k ]
Endy = v [ k ]
i f Endy == v [ l e n ( v ) − 1 ] :
C . a p p e n d ( ( Endy + Beg ) / 2 )
return C
p o s i t = f i n d _ c e n t e r ( where ( R z s _ t h ! = 0 ) [ 0 ] + l a g s [ 0 ] )
p r i n t ( " P o s i t i o n s where t h e s i g n a l e x c e e d s t h r e s h o l d : \ n " . l j u s t ( 3 5 ) ,
where ( R z s _ t h ! = 0 ) [ 0 ] + l a g s [ 0 ] )
p r i n t ( " Detected i n t e r f a c e s positions : " . l j u s t (35) , posit )
p r i n t ( " True p o s i t i o n s ; " . l j u s t ( 3 5 ) , pos )
Page 199/255
200 CHAPTER 12. RANDOM SIGNALS
This problem involves actually two sub-problems that are very interesting on their own: - smoothing of the
additive noise, - inversion.
Let us first examine a simple experiment which points-out the necessity of developing a rational ap-
proach instead of a adopting a naive one. We generate a random pulse train, filter it, and then reconstruct
the input signal by direct division by the transfer function:
X( f ) V(f)
S( f ) ≃ = S( f ) +
H( f ) H( f )
We consider both a noiseless case and a noisy case.
Illustrative experiment
N = 2000
a = −0.97
L = 50
s p o s = s t a t s . b e r n o u l l i . r v s ( l o c =0 , p = 0 . 6 , s i z e = i n t (N / L ) )
s = np . k r o n ( s p o s , np . o n e s ( L ) )
# x= s t a t s . norm ( 0 , 1 ) . r v s ( ( N) )
d = np . z e r o s (N)
d [0] = 1 # Dirac impulse
h = sig . l f i l t e r ([1 , 0.5 , 0.95] , [1 , a ] , d)
# h= s i g . l f i l t e r ( [ 1 , 0 . 6 , 0 . 9 5 , 1 . 0 8 , 0 . 9 6 ] , [ 1 , a ] , d )
H = f f t ( h , N)
X = fft (s) ∗ H
x = r e a l ( i f f t (X) )
plt . figure ()
plt . plot (x)
p l t . t i t l e ( " Observation " )
#
plt . figure ()
x _ r e c = r e a l ( i f f t (X / H) )
p l t . p l o t ( s , l a b e l =" True s i g n a l " )
p l t . p l o t ( x_rec , l a b e l =" R e c o n s t r u c t i o n " )
p l t . t i t l e ( " R e c o n s t r u c t i o n o f s i g n a l by d i r e c t i n v e r s i o n " )
p l t . ylim ([ −0.1 , 1 . 1 ] )
_ = p l t . legend ( )
# Noisy o b s e r v a t i o n
z = x + 0 . 2 5 ∗ s t a t s . norm ( 0 , 1 ) . r v s ( ( N) )
Z = fft (z)
plt . figure ()
plt . plot (z)
p l t . t i t l e ( " Noisy O b s e r v a t i o n " )
plt . figure ()
x _ r e c = r e a l ( i f f t ( Z / H) )
p l t . p l o t ( s , l a b e l =" True s i g n a l " )
Page 200/255
12.6. APPLICATIONS 201
Page 201/255
202 CHAPTER 12. RANDOM SIGNALS
p l t . p l o t ( 1 / a b s (H) )
[<matplotlib.lines.Line2D at 0x7f3fa40f5b38>]
y(n) = [w ∗ x](n)
The objective is to minimize the error e(n) = y(n) − s(n), and more precisely of the mean square error
[ ]
E e(n)2 .
Recall that
[ ] ∫
E e(n) = REE [0] = SEE ( f )d f .
2
RY −S,Y −S (k) = RYY (k) − RY S (k) − RSY (k) + RSS (k) (12.11)
SY −S,Y −S ( f ) = SYY ( f ) − SY S ( f ) − SSY ( f ) + SSS ( f ) (12.12)
Page 202/255
12.6. APPLICATIONS 203
Page 203/255
204 CHAPTER 12. RANDOM SIGNALS
From the transformation of the power spectrum by filtering and the symmetries of the cross-spectra, we
have
SY −S,Y −S ( f ) = |H( f )|2 |W ( f )|2 SSS ( f ) + |W ( f )|2 SVV ( f ) + H( f )W ( f )SSS ( f ) + H( f )∗W ( f )∗ SSS ( f ) + SSS ( f ).
• In the case H( f ) = 1, the problem reduces to a smoothing problem, that is to suppress the noise
without too much corrupting of the signal part. The Wiener filter reduces to
SSS ( f )
W(f) = . (12.16)
SSS ( f ) + SVV ( f )
In such case, we see that the transfer function tends to 1 if SSS ( f ) ≫ SVV ( f ) (frequency bands where the
signal is significantly higher than the noise), to zero if SSS ( f ) ≪ SVV ( f ) (much more noise than signal), and
otherwise realises a tradeoff guided by the signal-to-noise ratio in the frequency domain.
Experiment
We consider an example of optimum filtering, the Wiener smoother. Beginning with a noisy mixture x(n) =
s(n) + v(n), the goal is to find the best filter which minimizes the noise while preserving the signal: y(n) =
(h ∗ x)(n) ≃ s(n).
Simulate a signal
s(n) = exp(−at) sin(2π f0t + ϕ (ω )).
The corresponding implementation lines are
A=0.2; N=5000
t=arange(N)
s=exp(-0.001*t)*sin(2*pi*0.001*t+2*pi*rand(1))
w=A*randn(N)
x=s+w
Page 204/255
12.6. APPLICATIONS 205
Sss ( f )
H( f ) = ,
SSS ( f ) + SVV ( f )
where SSS ( f ) and SVV ( f ) are respectively the power spectra of the signal and of the noise. Implement
this filter and compute its output. In practice, what must be known in order to implement this filter? Is this
reasonable? Look at the impulse response and comment. What are the other difficulties for implementation?
A = 0.2
N = 5000
t = a r a n g e (N)
s = exp ( −0.001 ∗ t ) ∗ s i n ( 2 ∗ p i ∗ 0 . 0 0 1 ∗ t + 2 ∗ p i ∗ r a n d ( 1 ) )
w = A ∗ r a n d n (N)
figure (1)
p l o t (w)
t i t l e ( ’ Noise alone ’ )
x = s + w
figure (2)
plot ( s )
t i t l e ( ’ Signal ’ )
figure (3)
plot (x)
t i t l e ( ’ Observed s i g n a l ’ )
Implementation
S s s = 1 / N ∗ a b s ( f f t ( s ) ) ∗∗2
Svv = A ∗ A ∗ o n e s (N)
H = S s s / ( S s s + Svv )
xx = r e a l ( i f f t (H ∗ f f t ( x ) ) )
Page 205/255
206 CHAPTER 12. RANDOM SIGNALS
Page 206/255
12.6. APPLICATIONS 207
p l o t ( xx )
t i t l e ( ’ O u t p u t o f t h e Wiener s m o o t h e r ’ )
• One must know the spectra of the signal and of the noise. Here we have suppposed that the noise
is white and that we knew its variance. Furthermore, we assumed that the spectrum of the signal is
known.
• The impulse response may have an infinite support and is not causal. For implementation in real time,
on should select a causal solution. This requires to perform a spectral factorization and this is another
story, see here or here, page 208 for details.
and we look for the filter with impulse response w(n) such that y(n) = [w ∗ x](n) is as near as possible of
s(n): this can be formulated as the search for w which minimizes the mean square error
[ ]
E ([w ∗ x](n) − s(n))2 .
Page 207/255
208 CHAPTER 12. RANDOM SIGNALS
For a FIR filter, the convolution cans be written as the scalar product $
$
p−1
y(n) = [w ∗ x](n) = ∑ w(m)x(n − m) = wt x(n)
m=0
dwt x(n)
= x(n),
dw
we get that
dJ(w) [ ( )]
= 2E x(n) wt x(n) − s(n) (12.17)
dw [ ( )]
= 2E x(n) xt w(n) − s(n) , (12.18)
[ ]
= 2E x(n)x(n)t w − E [x(n)s(n)] . (12.19)
(12.20)
The first term involves a correlation matrix of x(n) and the second the vector of cross correlations between
x(n) and s(n). Denoting {
RXX = E [x(n)x(n)t ] ,
rSX = E [x(n)s(n)]
we obtain
RXX w = rSX
or
w = R−1
XX rSX
if RXX is invertible.
Page 208/255
13
Adaptive Filters
Adaptive filters are systems that are able to adapt their coefficients with respect to the properties of their
environment, in order to satisfy a given objective. Furthermore, they may also be able to adapt themselves
to modifications of the environment and track them. Many real-world applications employ adaptive filters, as
Hearing aids, Localization and tracking Active noise control (anti-noise), Noise suppression, Audio upmix
of stereo signals,Adaptive beamforming, MPEG audio coding, Non-linear echo cancellation, Adaptation of
neural networks, etc. The following figure, taken from [Ref][1], presents some possible applications:
We will first begin by describing the general filtering problem and derive the optimal solution, known
209
210 CHAPTER 13. ADAPTIVE FILTERS
as the Wiener filter. We will then explain how the solution can be obtained through iterative algorithms.
Finally, we will describe how these algorithms can be turned into adaptive filters.
[1]: M. Harteneck and R.W. Stewart, Adaptive Digital Signal Processing JAVA Teaching Tool, IEEE
TRANSACTIONS ON EDUCATION, MAY 2001, VOLUME 44, NUMBER 2, IEEDAB (ISSN 0018-
9359) online here
The classical formulation is as follows: Given a random signal u(n), we would like to find a transform
T {u} such that the result is as close as possible to some desired response d(n). We will restrict this
general problem on two aspects.
• First, we will only consider linear transforms of the sequence {u(n)}n=0..N−1 ; that is filterings
of u(n). Furthermore, we will even restrict ourselves to causal, finite impulse response filters with p
taps. We denote by w (with w for Wiener) the impulse response. For now, we assume that the system
is stationary, which implies that the impulse response does not depend on time n. Hence, the output
can be computed as the convolution product
p−1
y(n) = [w ∗ u](n) = ∑ w(m)u(n − m) (13.1)
m=0
• Second, the notion of “as close as” will be quantified by a cost function on the error
Any cost function could be used, such as |•|, |•|2 , |•|3 or even sinh e(n). . . Among these possibilities,
the square of the error yields interesting, closed-form solutions and simple computations.
Page 210/255
13.1. A GENERAL FILTERING PROBLEM 211
We can choose to work only with the sequences at hand and look at an integrated error such as
n1
Jls (w, n0 , n1 ) = ∑ e(n)2 (13.3)
n=n0
Such a criterion is called the Least Square criterion. We may also choose to work with the stochastic
processes on average, and consider a mean square error
[ ]
Jmse (w, n) = E e(n)2 . (13.4)
13.1.1 Introduction
Definitions
Observe that $J_mse(w,n) $ is a quadratic form in w. Therefore, the criterion admits a single global mini-
mum. To see this, let us develop the MSE:
[( )( )]
Jmse (w, n) = E wT u(n) − d(n) u(n)T w(n) − d(n) (13.5)
[ ] [ ]
= wT E u(n)u(n)T w − 2wT E [u(n)d(n)] + E d(n)2 (13.6)
= wT Ruu w − 2wT Rdu + σd2 (13.7)
where we denoted
{ [ ]
Ruu = E u(n)u(n)T the correlation matrix of u(n)
Rdu = E [d(n)u(n)] the correlation vector of d(n) and u(n)
We also used the fact that the dot product between two vectors is scalar and therefore equal to its
transpose: e.g. wT u(n) = u(n)T w.
From formula (13.7), it can be checked that the MSE can also be put into the form of a perfect square,
as
△ △ △ △
Jmse (w, n) = (w − w)T Ruu (w − w) − w T Ruu w +σd2 (13.8)
if
△ △
w : Ruu w = Rdu (13.9)
Page 211/255
212 CHAPTER 13. ADAPTIVE FILTERS
Since the quadratic form in (13.8) is always nonnegative, we see that the MSE is minimum if and only if
△
w = w = R−1
uu Rdu , (13.10)
△ △
Jmse (w, n) = σd2 − w T Rdu (13.11)
Alternatively, the minimum can also be found by equating the derivative of the criterion to zero. Indeed,
this derivative is [ ] [ ]
d dE e(n)2 de(n)
Jmse (w, n) = = 2E e(n) .
dw dw dw
Since e(n) = wT u(n) − d(n), its derivative with respect to w is u(n), and it remains
d
Jmse (w, n) = 2E [u(n)e(n)] (13.12)
dw [ ( )]
= 2E u(n) u(n)T w − d(n) (13.13)
= 2 (Ruu w − Rdu ) . (13.14)
Hence, the derivative is zero if and only if $R{uu} w= R{du} $ which is the solution (13.10).
Interestingly, we see that the optimum estimator depends only on the second order properties of the
desired response and the input sequence. This is a consequence of our choice of restricting ourselves to a
quadratic criterion and a linear transform.
that is
Jls (w, n0 , n1 ) = (U(n0 , n1 )w − d(n0 , n1 ))T (U(n0 , n1 )w − d(n0 , n1 )) .
Page 212/255
13.1. A GENERAL FILTERING PROBLEM 213
Now, it is a simple task to compute the derivative of this LS criterion with respect to w. One readily obtain
d
Jls (w, n0 , n1 ) = 2U(n0 , n1 )T (U(n0 , n1 )w − d(n0 , n1 )) ,
dw
The different matrices and vectors above depend on two indexes n0 and n1 . It is now time to discuss the
meaning of these indexes and the possible choices for their values. Suppose that the data are available on
N samples, from n = 0 to n = N − 1. When we want to compute the error e(k), with k < p, we see that the
result depend on unobserved values. The same kind of problem occurs if we want to compute the error for
k > N − 1. Therefore we face the problem of affecting a value to unobserved values. A possibility is to take
a value of zero for unobserved vales. Another possibility consists in affecting the values by periodization,
modulo N, of the available data. A last possibility is to avoid the situations which request the use of unknown
values.
The two main choices are the following:
• If we want to use only known values, it suffices to restrict the summation interval to the interval with
n0 = p − 1 and n1 = N − 1. The matrix U has dimensions (N − p) × p.This choice is sometimes known
as the covariance form.
• If we choose n0 = 0 and n1 = N − p − 2, with unknown values taken as zero, the corresponding choice
is called correlation form. The data matrix has now dimensions N + p − 1 × p.
[ ]
It is now easy to see that the generic term of U(n0 , n1 )T U(n0 , n1 ) i j has the form ∑n u(n − i)u(n − j),
that is, is (up to a factor) an estimate of the correlation Ruu (i − j). Consequently, we have an estimate of the
correlation matrix Ruu given by
[ ]
R̂uu = U(n0 , n1 )T U(n0 , n1 ) .
In the case of the choice of the correlation form for the data matrix, the resulting estimate of the correlation
matrix has Toeplitz symmetry. It is interesting to note that by construction, the estimated correlation matrix
is automatically non-negative definite. Similarly, Rdu can be estimated as
△ [ ]−1
w ls = U(n0 , n1 )T U(n0 , n1 ) U(n0 , n1 )T d(n0 , n1 ) = R̂−1
uu R̂du . (13.17)
Page 213/255
214 CHAPTER 13. ADAPTIVE FILTERS
We begin by simulating the problem. You may use the function lfilter to com-
pute the output of the system. Take for x a gaussian noise, np.random.normal or
np.random.randn, with unit variance on N points, and add a gaussian noise with scale
factor 0.1 on the output.
# DO IT YOURSELF!
#
from s c i p y . s i g n a l i m p o r t l f i l t e r
N=0 # u p d a t e t h i s
x=0 # u p d a t e t h i s
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
y0 =0 # ăFILL IN SOMETHING CORRECT HERE
y=0 # ăFILL IN SOMETHING CORRECT HERE
# y0 = # n o i s e l e s s o u t p u t
# y= # n o i s y o u t p u t
from s c i p y . s i g n a l i m p o r t l f i l t e r
# test
N=200
x=np . random . r a n d n (N)
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
#L= s i z e ( h t e s t )
# yo= z e r o s (N)
# for t in range (L,200) :
# yo [ t ] = h t e s t . d o t ( x [ t : t −L : − 1 ] )
# y=yo+ 0 . 1 ∗ r a n d n (N)
y= l f i l t e r ( h t e s t , [ 1 ] , x ) + 0 . 1 ∗ r a n d n (N)
plt . plot (y)
p l t . x l a b e l ( " Time " )
p l t . t i t l e ( " Observation " )
f i g c a p t i o n ( " System o u t p u t i n an i d e n t i f i c a t i o n p r o b l e m " )
Page 214/255
13.1. A GENERAL FILTERING PROBLEM 215
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
Once this is done, we shall solve the normal equation (13.9). Of course, we firts need to estimate the
correlation matrix Ruu and the correlation vector Rdu . This can be done with the functions xcorr and
toeplitz. Beware on the fact that xcorr returns two vectors and that the returned correlation vector is
the symmetric sequence with positive and negative indexes.
Now, in order to implement the identification procedure, one has to put the problem as a Wiener
problem and identify the input sequence u and the desired one d. Actually, here one should
simply observe that we look for a filter, which excited by the same x(n) should yield an output
z(n) as similar as y0 (n) as possible. So, what would you take for u and d?
One thus take u=x, and d=y(the wanted sequence is y0 (n), which shall be substituted by y(n) – since y0
is unknown).
We now have to implement the estimation of correlations and then compute the solution to the
normal equation. We note q + 1 the size of the filter (then of the correlation vector and matrix).
The inverse of a matrix can be obtained using the function inv in the module np.linalg.
The matrix mutiplication can be done using the .dot() method. Finally, you may evaluate the
performance by displaying the identified coefficients and by computing the MMSE according
to (13.11).
# DO IT YOURSELF!
from c o r r e l a t i o n i m p o r t x c o r r
Page 215/255
216 CHAPTER 13. ADAPTIVE FILTERS
from s c i p y . l i n a l g i m p o r t t o e p l i t z
from numpy . l i n a l g i m p o r t i n v
q=5
z=np . z e r o s ( q + 1 )
u=z # u p d a t e t h i s
d=z # u p d a t e t h i s
c=z # u p d a t e t h i s # c o r r e l a t i o n v e c t o r
Ruu=np . o u t e r ( z , z ) # u p d a t e t h i s
Rdu=z # u p d a t e t h i s
w=z # u p d a t e t h i s
p r i n t ( " E s t i m a t e d f i l t e r " , w)
p r i n t ( " True f i l t e r " , h t e s t )
# Minimum e r r o r
s i g m a 2 d =mean ( d ∗ ∗ 2 )
mmse= sigma2d−w . d o t ( Rdu )
p r i n t ( "MMSE: " , mmse )
from c o r r e l a t i o n i m p o r t x c o r r
from s c i p y . l i n a l g i m p o r t t o e p l i t z
from numpy . l i n a l g i m p o r t i n v
q=5
u=x
Page 216/255
13.1. A GENERAL FILTERING PROBLEM 217
d=y
c= x c o r r ( u , u , m a x l a g s =q ) [ 0 ] [ q : : ] # c o r r e l a t i o n v e c t o r
Ruu= t o e p l i t z ( c )
Rdu= x c o r r ( d , u , m a x l a g s =q ) [ 0 ] [ q : : ]
w= i n v ( Ruu ) . d o t ( Rdu )
p r i n t ( " E s t i m a t e d f i l t e r " , w)
p r i n t ( " True f i l t e r " , h t e s t )
# Minimum e r r o r
s i g m a 2 d =mean ( d ∗ ∗ 2 )
mmse= sigma2d−w . d o t ( Rdu )
p r i n t ( "MMSE: " , mmse )
Finally, it is interesting to transform the lines above in order to plot the MMSE error as a
function of q.
from c o r r e l a t i o n i m p o r t x c o r r
from s c i p y . l i n a l g i m p o r t t o e p l i t z
from numpy . l i n a l g i m p o r t i n v
u=x
d=y
qmax=18 # max v a l u e f o r q
mmse=np . z e r o s ( qmax ) # i n i t i a l i z e t h e v e c t o r o f e r r o r s
f o r q i n r a n g e ( 0 , qmax ) :
c= x c o r r ( u , u , m a x l a g s =q ) [ 0 ] [ q : : ] # c o r r e l a t i o n v e c t o r
Ruu= t o e p l i t z ( c )
Rdu= x c o r r ( d , u , m a x l a g s =q ) [ 0 ] [ q : : ]
w= i n v ( Ruu ) . d o t ( Rdu )
# Minimum e r r o r
s i g m a 2 d =mean ( d ∗ ∗ 2 )
mmse [ q ] = sigma2d−w . d o t ( Rdu )
p r i n t ( "MMSE: " , mmse )
p l t . p l o t ( r a n g e ( 0 , qmax ) , mmse )
p l t . x l a b e l ( " Order of the f i l t e r " )
p l t . y l a b e l ( "MMSE" )
p l t . t i t l e ( "MMSE a s a f u n c t i o n o f t h e l e n g t h o f t h e i d e n t i f i c a t i o n f i l t e r " )
f i g c a p t i o n ( "MMSE a s a f u n c t i o n o f t h e l e n g t h o f t h e i d e n t i f i c a t i o n f i l t e r " )
The evolution of the MMSE with respect to q shows that the MMSE is important while the length of
the identification filter is underestimated. The MMSE falls to a “floor” when the length is equal to or higher
than the true value. This offers an easy way to detect an “optimal” order for the identification.
Page 217/255
218 CHAPTER 13. ADAPTIVE FILTERS
Remark 1. Actually, the identification error always decreases when one increases the length of the filter,
that is add degrees of freedom to perform the identification. Usually, increasing the number of parameters
decreases the statistical stability of the estimate, and one has to made a trade-off between a sufficient number
of parameters to avoid a bias ans a low number of parameter to lower the variance of the estimate. This
is the notion of bias-variance trade-off that appears in many areas of statistical signal processing. Thus,
for choosing an “optimal” order, one usually use a composite criterion where the first term is the MMSE,
decreasing with the order, and a second term which increases with the order, thus penalizing high orders.
Page 218/255
13.2. THE STEEPEST DESCENT ALGORITHM 219
where ∇ f (x) denotes the gradient of f at x and ∇2 f (x) the Hessian. Restricting ourselves to the first order
approximation, we see that if we choose ∆xT ∇ f (x) < 0, then f (x + ∆x) < f (x), i.e. f decreases. The
higher |∆xT ∇ f (x)|, the most important the decrease. The scalar product is maximum when the two vectors
are colinear, and they must have opposite direction so as to obtain a negative scalar product. This yields
∆x = −∇ f (x).
The negative of the gradient is known as the direction of steepest descent. Usually, to keep ∆x small enough
for the validity of the Taylor approximation, one uses a small positive factor µ in front of the gradient. This
leads to the following iterative algorithm
which is known as the steepest descent algorithm. We begin with an initial guess x0 of the solution and
take the gradient of the function at that point. Then we update the solution in the negative direction of the
gradient and we repeat the process until the algorithm eventually converges where the gradient is zero. Of
course, this works if the function at hands possesses a true minimum, and even in that case, the solution
may correspond to a local minimum. In addition, the value of the step-size µ can be crucial for the actual
convergence and the speed of convergence to a minimum.
We give below a simple implementation of a steepest descent algorithm. Beyond formula (13.18), we
have refined by
• specifying a stopping rule: error less than a given precision err or number of iteration greater than a
maximum number of iterations itermax
• a line-search procedure line_search (True by default) which adapts the step-size in order to ensure
that the objective function actually decreases
• a verbose mode verbose (True by default) which prints some intermediary results.
• Gradient descent
• Conjugate gradients
d e f g r a d _ a l g o ( f , g , mu , x0 =0 , e p s = 0 . 0 0 1 , g r a d _ p r e c = 0 . 0 0 0 1 , i t e r m a x =200 ,
l i n e _ s e a r c h = True , v e r b o s e = T r u e ) :
d e f u p d a t e _ g r a d ( xk , mu ) :
r e t u r n xk−mu∗g ( xk )
xk=np . z e r o s ( ( np . s i z e ( x0 ) , i t e r m a x ) )
xk [ : , 0 ] = x0
e r r =1
k=0
w h i l e e r r > e p s and k< i t e r m a x −1:
e r r =norm ( xk [ : , k]− u p d a t e _ g r a d ( xk [ : , k ] , mu ) , 1 )
xk [ : , k +1]= u p d a t e _ g r a d ( xk [ : , k ] , mu )
i f ( np . any ( np . i s n a n ( xk [ : , k + 1 ] ) ) o r np . any ( np . i s i n f ( xk [ : , k + 1 ] ) ) ) :
break
Page 219/255
220 CHAPTER 13. ADAPTIVE FILTERS
m=0
# l i n e search : look for a step t h a t ensures t h a t the o b j e c t i v e
function decreases
if line_search :
w h i l e f ( xk [ : , k + 1 ] ) > f ( xk [ : , k ] ) :
# p r i n t (" Updating . . " , f ( xk [ k + 1 ] ) , f ( xk [ k ] ) )
m=m+1
xk [ : , k +1]= u p d a t e _ g r a d ( xk [ : , k ] , mu ∗ ( 0 . 5 ) ∗∗m)
# avoid to stay s t a l l e d
i f norm ( g ( xk [ : , k ] ) +g ( xk [ : , k −1]) , 1 ) < g r a d _ p r e c :
# p r i n t ( " g r a d i e n t s . . " , g ( xk [ k + 1 ] ) , g ( xk [ k ] ) )
mu=mu∗ 0 . 9 9
xk [ : , k +1]= u p d a t e _ g r a d ( xk [ : , k ] , mu )
i f verbose :
i f np . s i z e ( x0 ) ==1:
print ( " current solution {:2.2 f } , error : {:2.2 e} , gradient
{ : 2 . 2 e } , o b j e c t i v e { : 2 . 2 f } " . f o r m a t ( xk [ 0 , k + 1 ] , e r r , g ( xk [ 0 , k
+ 1 ] ) , f ( xk [ 0 , k + 1 ] ) ) )
else :
print ( " error : {:2.2 e} , gradient {:2.2 e} , objective {:2.2 f}" .
f o r m a t ( e r r , norm ( g ( xk [ : , k + 1 ] ) , 2 ) , f ( xk [ : , k + 1 ] ) ) )
# pass
k=k+1
r e t u r n xk [ : , : k ]
Let us illustrate the SDA in the case of an bivariate quadratic function. You may experiment by modifying
the initial guess and the step-size µ .
def f ( x ) : # o b j e c t i v e function
r e t u r n np . sum ( x ∗ ∗ 2 ) #
def f f ( x ) :
r e t u r n np . a r r a y ( [ f ( xx ) f o r xx i n x ] )
# T e s t # −−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
d e f t s t ( i n i 0 , i n i 1 , mu ) :
eps =0.001
xk= g r a d _ a l g o ( f , g , mu=mu , x0 = [ i n i 0 , i n i 1 ] , e p s = 0 . 0 0 1 , g r a d _ p r e c = 0 . 0 0 0 1 ,
i t e r m a x =200 , l i n e _ s e a r c h = F a l s e , v e r b o s e = F a l s e )
# xk= g r a d _ a l g o ( f , g , mu = 0 . 0 5 , x0 = 0 . 5 , e p s = 0 . 0 0 1 , g r a d _ p r e c = e p s / 1 0 ,
i t e r m a x =200 , l i n e _ s e a r c h = F a l s e , v e r b o s e = T r u e )
c l e a r _ o u t p u t ( w a i t =True )
x=np . l i n s p a c e ( − 5 , 5 , 4 0 0 )
plt . plot (x , ff (x) )
x=xk [ 0 , : ]
p l t . p l o t ( x , f f ( x ) , ’ o−’ )
x=xk [ 1 , : ]
p l t . p l o t ( x , f f ( x ) , ’ o−’ )
def t s t o ( val ) :
t s t ( x0 . v a l u e , x1 . v a l u e , mu . v a l u e )
x0= w i d g e t s . F l o a t T e x t ( v a l u e = 3 . 5 )
x1= w i d g e t s . F l o a t T e x t ( v a l u e = −4.2)
mu= w i d g e t s . F l o a t S l i d e r ( min =0 , max = 1 . 4 , s t e p = 0 . 0 1 , v a l u e = 0 . 8 5 )
Page 220/255
13.2. THE STEEPEST DESCENT ALGORITHM 221
# c= w i d g e t s . C o n t a i n e r W i d g e t ( c h i l d r e n = ( i n i 0 , i n i 1 ) )
x0 . o b s e r v e ( t s t o , names = [ " v a l u e " ] )
x1 . o b s e r v e ( t s t o , names = [ " v a l u e " ] )
mu . o b s e r v e ( t s t o , names = [ " v a l u e " ] )
d i s p l a y ( w i d g e t s . VBox ( [ x0 , x1 , mu ] ) )
# _= i n t e r a c t ( t s t , i n i 0 =x0 , i n i 1 =x1 , mu=mu )
def g ( x ) :
r e t u r n ( ( x −1) ∗ ( x + 3 ) ∗ ( x −3) ) # 2∗ x #
# T e s t # −−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−
d e f t s t ( i n i 0 , i n i 1 , mu ) :
eps =0.001
xk= g r a d _ a l g o ( f , g , mu=mu , x0 = [ i n i 0 , i n i 1 ] , e p s = 0 . 0 0 1 , g r a d _ p r e c = 0 . 0 0 0 1 ,
i t e r m a x =200 , l i n e _ s e a r c h = F a l s e , v e r b o s e = F a l s e )
# xk= g r a d _ a l g o ( f , g , mu = 0 . 0 5 , x0 = 0 . 5 , e p s = 0 . 0 0 1 , g r a d _ p r e c = e p s / 1 0 ,
i t e r m a x =200 , l i n e _ s e a r c h = F a l s e , v e r b o s e = T r u e )
x=np . l i n s p a c e ( − 5 , 5 , 4 0 0 )
plt . plot (x , ff (x) )
x=xk [ 0 , : ]
p l t . p l o t ( x , f f ( x ) , ’ o−’ )
x=xk [ 1 , : ]
p l t . p l o t ( x , f f ( x ) , ’ o−’ )
plt . figure ()
x=np . l i n s p a c e ( − 5 , 5 , 1 0 0 )
xx , yy= m e s h g r i d ( x , x )
z=np . z e r o s ( ( l e n ( xx ) , l e n ( yy ) ) )
f o r m, a i n e n u m e r a t e ( x ) :
for n , b in enumerate ( x ) :
z [ n ,m] = f ( a r r a y ( [ a , b ] ) )
# p r i n t (m, n , a , b , z [m, n ] )
# z = [ [ f ( a r r a y ( [ a , b ] ) ) f o r a i n xx ] f o r b i n yy ]
h = p l t . contour (x , x , z ,20)
p l t . p l o t ( xk [ 0 , : ] , xk [ 1 , : ] , ’ o−’ )
x0= w i d g e t s . F l o a t T e x t ( v a l u e = 0 . 5 ) # o r −1.5
x1= w i d g e t s . F l o a t T e x t ( v a l u e = 1 . 2 ) # 0.8
mu= w i d g e t s . F l o a t S l i d e r ( min =0 , max = 1 . 4 , s t e p = 0 . 0 1 , v a l u e = 0 . 0 7 )
# c= w i d g e t s . C o n t a i n e r W i d g e t ( c h i l d r e n = ( i n i 0 , i n i 1 ) )
_= i n t e r a c t ( t s t , i n i 0 =x0 , i n i 1 =x1 , mu=mu )
Page 221/255
222 CHAPTER 13. ADAPTIVE FILTERS
Page 222/255
13.3. APPLICATION TO THE ITERATIVE RESOLUTION OF THE NORMAL EQUATIONS 223
x=np . l i n s p a c e ( − 5 , 5 , 4 0 0 )
y = ( x −1) ∗ ( x + 3 ) ∗ ( x −3) #
y=x ∗ ∗ 4 / 4 − x ∗ ∗ 3 / 3 − 9∗ x ∗ ∗ 2 / 2 + 9∗ x
plt . plot (x , y)
[<matplotlib.lines.Line2D at 0x7fd4ec5a77b8>]
i m p o r t sympy
x=sympy . s y m b o l s ( ’ x ’ )
e=sympy . e x p a n d ( ( x −1) ∗ ( x + 3) ∗ ( x −3) )
print (e)
sympy . i n t e g r a t e ( e )
sympy . p l o t ( e )
<sympy.plotting.plot.Plot at 0x7fd4e9efb3c8>
Definitions
[ ]
Jmse (w) = E e(n)2 (13.19)
= w Ruu w − 2w
T T
Rdu + σd2 (13.20)
Page 223/255
224 CHAPTER 13. ADAPTIVE FILTERS
The derivative is zero if and only if $R{uu} w= R{du} $ which is the normal equation.
Instead of directly solving the normal equation by taking the inverse of Ruu , we can also minimize the
original criterion using a SDA algorithm. Since the MMSE criterion is a quadratic form in w, it has an only
△
minimum w which will be reached regardless of the initial condition.
Beginning with the general formulation (13.18) of the SDA, and using the expression of the gradient of
the MMSE, we readily obtain
depends on time; in such a case, the SDA would depend on both iterations and time.
Page 224/255
13.3. APPLICATION TO THE ITERATIVE RESOLUTION OF THE NORMAL EQUATIONS 225
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
Implement a function that iterates the SDA, beginning with an initial condition winit until the (norm
of the) increment between two successive updates is less than a given precision eps (use a while loop).
The syntax of the function should be
d e f s d a ( Ruu , Rdu , w i n i t , mu = 0 . 0 5 , e p s = 0 . 0 0 1 , v e r b o s e = F a l s e ) :
i t e r m a x =2000
e r r =(100 , 100)
k=0
w= w i n i t
w h i l e np . l i n a l g . norm ( e r r , 2 ) > e p s and k< i t e r m a x −1:
e r r = ( Ruu . d o t (w)−Rdu )
w=w−mu∗ e r r
Page 225/255
226 CHAPTER 13. ADAPTIVE FILTERS
k+=1
i f v e r b o s e : p r i n t ( " I t e r a t i o n { 0 : d } , e r r o r : { 1 : 2 . 2 e } " . f o r m a t ( k , np .
l i n a l g . norm ( e r r , 2 ) ) )
r e t u r n w, k
We can also study the behavior and performance of the SDA as a function of the step-size µ .
from c o r r e l a t i o n i m p o r t x c o r r
from s c i p y . l i n a l g i m p o r t t o e p l i t z
from numpy . l i n a l g i m p o r t i n v
u=x
d=y
q=6
c= x c o r r ( u , u , m a x l a g s =q ) [ 0 ] [ q : : ] # c o r r e l a t i o n v e c t o r
Ruu= t o e p l i t z ( c )
Rdu= x c o r r ( d , u , m a x l a g s =q ) [ 0 ] [ q : : ]
wopt= i n v ( Ruu ) . d o t ( Rdu )
k=0
m u _ i t e r =np . a r a n g e ( 0 , 0 . 5 1 , 0 . 0 1 )
n i t e r =np . empty ( np . s h a p e ( m u _ i t e r ) )
f o r mu i n m u _ i t e r :
w, n b i t e r = s d a ( Ruu , Rdu , w i n i t =np . z e r o s ( q + 1 ) , mu=mu , e p s = 0 . 0 0 1 , v e r b o s e =
False )
n i t e r [ k ]= n b i t e r
k+=1
# p r i n t ( " f o r mu = { 0 : 1 . 3 f } , number o f i t e r a t i o n s : { 1 : } " . f o r m a t ( mu , n b i t e r ) )
p r i n t ( " L a s t i d e n t i f i e d f i l t e r " , w)
print ( " true f i l t e r " , htest )
p l t . p l o t ( mu_iter , n i t e r )
p l t . x l a b e l ( " $ \ mu$ " )
p l t . y l a b e l ( " Number o f i t e r a t i o n s " )
f i g c a p t i o n ( " Number o f i t e r a t i o n s o f t h e g r a d i e n t a l g o r i t h m a s a f u n c t i o n o f
$ \ mu$ " ,
l a b e l =" f i g : i t e r g r a d " )
Page 226/255
13.3. APPLICATION TO THE ITERATIVE RESOLUTION OF THE NORMAL EQUATIONS 227
We observe that the number of iterations needed to obtain the convergence (up to a given precision) es-
sentially decreases with µ , up to a minimum. After this minimum, the number of iterations shortly increases,
up to a value of µ where the algorithm begins to diverge.
Page 227/255
228 CHAPTER 13. ADAPTIVE FILTERS
It is then immediate to express the error at iteration n + 1 in terms of the initial error v(0):
Clearly, if the algorithm converges, the error shall tends to zero and so doing forget the initial conditions.
Here, the error decreases to zero if (I − Ruu )n+1 tends to the null matrix. This happens if all the eigenvalues
of (I − Ruu ) have a modulus inferior to 1. To see this, let us introduce the eigen-decomposition of Ruu :
Ruu = V VH
where V is the matrix of right eigenvectors of Ruu , and the corresponding diagonal matrix of eigenvalues.
The superscript H indicates the conjugate transposition (that is transposition plus conjugation). In the case
of a correlation matrix, the eigenvalues are all non-negative, and the eigenvectors can be chosen normed and
orthogonal to each other. In other terms, V is unitary:
VVH = I or V−1 = VH .
Therefore, (I − µ Ruu ) can be put under the form V (I − µ ) VH . This shows that the eigenvalues of the
matrix have the form 1 − µλi , where the λi are the eigenvalues of the correlation matrix. For the power
(n + 1) we then obtain
(I − µ Ruu )n+1 = V (I − µ )n+1 VH .
Hence we see that this matrix will converge to zero if and only if
- If 1 − µλi > 0, this yields 1 − µλi < 1 and therefore since λi ≥ 0 implies µ ≥ 0; - If 1 − µλi < 0, we
obtain µλi − 1 < 1, so that µ < 2/λi . Since this must be true for all λi , we can only keep the most restrictive
inequality: µ < 2/λmax , where λmax denotes the maximum eigenvalue.
Finally, we obtain the following condition
2
0≤µ <
λmax
Optimum step-size
From (13.3.1), we see that the speed of convergence will be limited by slowest eigenvalue, that is by the
eigenvalue whose modulus is the nearest to one. Hence, in order to minimize the convergence time, we have
to select the maximum of the 1 − µλk , with respect to k, and minimize that value with respect to µ . Hence
we face a minimax problem:
min max |1 − µλk |
µ k
Suppose that there exists a µopt that realizes the minimum with respect to µ . For µ > µopt , we then have
eigenvalue λ . In such a case, we have vH Ruu v = vH λ v = λ ||v||2 . Since we just seen that vH Ruu v ≥ 0, we deduce that all the
eigenvalues λ are non-negative.
Page 228/255
13.3. APPLICATION TO THE ITERATIVE RESOLUTION OF THE NORMAL EQUATIONS 229
It is worth recalling that we introduced the iterative algorithm in order to avoid the direct inversion of
the correlation matrix, possibly for computational load reasons. However, computing the eigenvalues of the
correlation matrix is at least as complicated as computing the inverse. Thus we do not gain anything if we
compute the optimum step-size (13.3.1). Fortunately, we can use the following value:
2
µ= ,
Tr [Ruu ]
where Tr denotes the trace operator, that is the sum of the eigenvalues. Sine we know that the trace is also
the sum of therms in the main diagonal, and since the matrix is Toeplitz, we also have
2
µ= ,
pRuu (0)
[ ]
where Ruu (0) = E |u(n)|2 and p is the dimension of the correlation matrix.
Sn = B0 + B + B2 + . . . + Bk + . . . + Bn
is given by
Sn = (1 − B)−1 (1 − Bn+1 ) . (13.26)
If the spectral radius of B is less than 1, then limn→∞ Bn = 0, and
S∞ = (1 − B)−1 .
Page 229/255
230 CHAPTER 13. ADAPTIVE FILTERS
B0 + B + B2 + . . . + Bk + . . .
where B is any matrix. The sum of the first n terms of this geometric series is given by (1). Of course, we
also have
Therefore we have
(B − 1)Sn = −1 + Bn+1 ,
and finally the result (13.26) follows after applying the left inverse of (B − 1) to both sides.
Application – This can be applied for instance to the matrix B = 1 − µ A. Here it gives
+∞
µ S∞ = µ ∑ (1 − µ A)k = A−1
k=0
Remark 2. If B has a spectral radius less than one, then (1 − B) is invertible. Consider the eigendecompo-
sition of B as:
B = V ΛV −1 ,
where V is the matrix of right eigenvectors of B, and Λ the corresponding diagonal matrix of eigenvalues.
Then (1 − A) = (VV −1 −V ΛV −1 ) = V (1 − Λ)V −1 . The last relation is noting but a possible eigendecompo-
sition of (1 − B). This shows that the corresponding eigenvalues have the form 1 − λi . If all the eigenvalues
have a modulus inferior to 1, then 1 − λi is never equal to zero and the matrix (1 − B) is invertible.
Let us illustrate numerically that the sum of the geometric series generated by B is indeed (I − B)−1
# We g e n e r a t e a random m a t r i x B , compute i t s e i g e n d e c o m p o s i t i o n and
n o r m a l i z e by t h e maximum
# e i g n e v a l u e . T h e r e f o r e , t h e s p e c t a l r a d i u s i s i n f e r i o r t o 1 , and t h e
property applies
p =50
B=np . random . r a n d n ( p , p )
L , V= np . l i n a l g . e i g ( B)
l l =np . max ( np . a b s ( L ) )
B=B / ( 1 . 1 ∗ l l )
f o r k i n np . a r a n g e (N) :
S=S+C
Page 230/255
13.3. APPLICATION TO THE ITERATIVE RESOLUTION OF THE NORMAL EQUATIONS 231
C=C . d o t ( B )
e r r [ k ] = np . l i n a l g . norm ( I B i −S , 2 )
By the property (13.3.2), the inverse of the correlation matrix can be computed as
+∞
R−1
uu = µ ∑ (1 − µ Ruu )k .
k=0
that is also
w(n + 1) = w(n) − µ (Ruu w(n) − Rdu )
Page 231/255
232 CHAPTER 13. ADAPTIVE FILTERS
Hence, we obtain an iterative formula for computing the solution of the normal equation (13.9), without
explicitly computing the inverse of the correlation matrix. It is an exact algorithm, which converges to the
true solution:
△
lim w(n) = w = R−1 uu Rdu .
n→∞
As we saw above, this algorithm also appears as a steepest descent algorithm applied to the minimization of
the Mean Square Error.
A few references –
[http://nowak.ece.wisc.edu/ece830/ece830\\_spring13\\_adaptive\\_filtering.pdf](htt
[http://www.roma1.infn.it/exp/cuore/pdfnew/ch07.pdf](http://www.roma1.infn.it/exp/c
[http://www.ece.utah.edu/~mathews/ece6550/chapter4.pdf](http://www.ece.utah.edu/~ma
[http://en.wikipedia.org/wiki/Least\\_mean\\_squares\\_filter#Normalised\\_least\\_
i m p o r t mpld3
mpld3 . e n a b l e _ n o t e b o o k ( )
import warnings
warnings . s i m p l e f i l t e r ( ’ d e f a u l t ’ )
Page 232/255
13.4. ADAPTIVE VERSIONS 233
which corresponds to filtering with a sliding (rectangular) window of length 2L + 1. Note that it is possible
to compute this recursively as
The following lines simulate a non-stationary signal with time-varying power. We implement the expo-
nential average for estimating the power. You should experiment with the values of λ .
import m a t p l o t l i b . pyplot as p l t
from I P y t h o n . d i s p l a y i m p o r t c l e a r _ o u t p u t , d i s p l a y , HTML, Image , J a v a s c r i p t
%m a t p l o t l i b i n l i n e
i m p o r t numpy a s np
import ipywidgets as widgets
from i p y w i d g e t s i m p o r t i n t e r a c t , i n t e r a c t i v e
N=1000
# mpld3 . d i s a b l e _ n o t e b o o k ( )
from s c i p y . s p e c i a l i m p o r t e x p i t # l o g i s t i c f u n c t i o n
from I P y t h o n . d i s p l a y i m p o r t d i s p l a y , c l e a r _ o u t p u t
x=np . random . n o r m a l ( s i z e =N)
t =np . l i n s p a c e ( −6 ,6 ,N)
z=x ∗ ( 2 ∗ e x p i t ( t ) −1)
d e f p l t _ v s _ l a m b d a ( lamb ) :
Page 233/255
234 CHAPTER 13. ADAPTIVE FILTERS
We will substitute the true values with estimated ones. An important remark is that the result of the normal
equation is insensitive to a scale factor on the estimates. It is thus possible to estimate the correlation matrix
and vector using a sliding average
{
R̂uu (n) = ∑Ll=−L u(n − l)u(n − l)H
R̂du (n) = ∑Ll=−L d(n − l)u(n − l)
or by an exponential mean
n
R̂uu (n) = ∑ λ l−n u(l)u(l)H = λ R̂uu (n − 1) + u(n)u(n)H
l=0
which yields {
R̂uu (n) = ∑nl=0 λ l−n u(l)u(l)H = λ R̂uu (n − 1) + u(n)u(n)H
R̂du (n) = λ R̂du (n − 1) + d(n)u(n).
Page 234/255
13.4. ADAPTIVE VERSIONS 235
{
R̂uu (n) = u(n)u(n)H
R̂du (n) = d(n)u(n).
This merely consists in suppressing the expectations in the theoretical formulas. So doing, we obtain for-
mulas which directly depend on the data, with no need to know something on the theoretical statistics, and
which also depend on time, thus confering adaptivity to the algorithm. Plugging these estimates in the SDA,
we obtain
from s c i p y . s i g n a l i m p o r t l f i l t e r
# test
figplot =False
N=800
x= l f i l t e r ( [ 1 , 1 ] , [ 1 ] , np . random . r a n d n (N) )
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
y0= l f i l t e r ( h t e s t , [ 1 ] , x )
y=y0 + 0 . 1 ∗ r a n d n (N)
if figplot :
plt . plot (y)
p l t . x l a b e l ( " Time " )
p l t . t i t l e ( " Observation " )
f i g c a p t i o n ( " System o u t p u t i n an i d e n t i f i c a t i o n p r o b l e m " )
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
Now, since one should do it at least one time, try to implement a LMS algorithm. You will will define a
function with the following syntax:
d e f lms ( d , u , w, mu ) :
"""
I m p l e m e n t s a s i n g l e i t e r a t i o n o f t h e s t o c h a s t i c g r a d i e n t (LMS) \ n
: math : ‘w( n + 1 ) =w( n ) + \ \ mu u ( n ) \ \ l e f t ( d ( n )−w( n ) ^T u ( n ) \ \ r i g h t ) ‘
Input :
======
d : d e s i r e d sequence at time n
u : input of length p
w : wiener f i l t e r to update
mu : adaptation step
Page 235/255
236 CHAPTER 13. ADAPTIVE FILTERS
Returns :
=======
w : upated f i l t e r
e r r : d−d e s t
d e s t : p r e d i c t i o n = : math : ‘ u ( n ) ^T w‘
"""
d e s t =0
e r r =d−d e s t
#
# DO IT YOURSELF!
#
r e t u r n (w, e r r , d e s t )
Input :
======
d : d e s i r e d sequence at time n
u : input of length p
w : wiener f i l t e r to update
mu : adaptation step
Returns :
=======
w : upated f i l t e r
e r r : d−d e s t
d e s t : p r e d i c t i o n = : math : ‘ u ( n ) ^T w‘
"""
d e s t =u . d o t (w)
e r r =d−d e s t
w=w+mu∗u∗ e r r
r e t u r n (w, e r r , d e s t )
Page 236/255
13.4. ADAPTIVE VERSIONS 237
Identification procedure
• Begin by some direct commands (initializations and a for loop on the time variable) for identifying
the filter; once this works you will implement th commands as a function ident
• If necessary, the function squeeze() enable to remove single-dimensional entries from the shape
of an n-D array (e.g. transforms an array (3,1,1) into a vector of dimension 3)
In order to evaluate the algorithm behavior, you will plot the estimation error, the evolution of the
coefficients of the identified filter during the iterations of the algorithm; and finally the quadratic error
between the true filter and the identified one. This should be done for several orders p (the exact order is
unknown. . . ) and for different values of the adaptation step µ .
• The quadratic error can be evaluated simply thanks to a comprehension list according to
Errh=[sum(he-w[:,n])**2 for n in range(N+1)]
# Results :
plt . figure (1)
t t =np . a r a n g e (NN)
p l t . p l o t ( t t , y0 [ : NN] , l a b e l = ’ I n i t i a l N o i s e l e s s O u t p u t ’ )
p l t . p l o t ( t t , y e s t [ : NN] , l a b e l = " E s t i m a t e d O u t p u t " )
p l t . x l a b e l ( ’ Time ’ )
f i g c a p t i o n ( " C o m p a r i s o n o f t r u e o u t p u t and e s t i m a t e d one a f t e r i d e n t i f i c a t i o n
",
l a b e l =" f i g : ident_lms_compareoutputs " )
Page 237/255
238 CHAPTER 13. ADAPTIVE FILTERS
mu= 0 . 0 5 # an i n i t i a l v a l u e f o r mu
L=6 # s i z e o f i d e n t i f i e d f i l t e r ( t r u e s i z e i s p )
NN=200 # number o f i t e r a t i o n s
e r r =np . z e r o s (NN)
w= z e r o s ( ( L ,NN+ 1 ) )
y e s t =np . z e r o s (NN)
f o r t i n np . a r a n g e ( L ,NN) :
(w [ : , t + 1 ] , e r r [ t ] , y e s t [ t ] ) = lms ( y [ t ] , x [ t : t −L : − 1 ] ,w [ : , t ] , mu )
Page 238/255
13.4. ADAPTIVE VERSIONS 239
Figure 13.7: Comparison of true output and estimated one after identification
LL=np . min ( [ np . s i z e ( h t e s t ) , L ] )
n e w h t e s t [ : LL] = h t e s t [ : LL ]
# Results :
plt . figure (1)
t t =np . a r a n g e (NN)
p l t . p l o t ( t t , y0 [ : NN] , l a b e l = ’ I n i t i a l N o i s e l e s s O u t p u t ’ )
p l t . p l o t ( t t , y e s t [ : NN] , l a b e l = " E s t i m a t e d O u t p u t " )
p l t . x l a b e l ( ’ Time ’ )
f i g c a p t i o n ( " C o m p a r i s o n o f t r u e o u t p u t and e s t i m a t e d one a f t e r i d e n t i f i c a t i o n
",
l a b e l =" f i g : ident_lms_compareoutputs " )
We can now implement the identification as a function on its own, which simply maked some initializa-
tions and use a loop on the LMS. Implement this function according to the following syntax.
d e f i d e n t ( o b s e r v a t i o n , i n p u t _ d a t a , mu , p =20 , h _ i n i t i a l = z e r o s ( 2 0 ) ) :
" " " I d e n t i f i c a t i o n o f an i m p u l s e r e s p o n s e from an o b s e r v a t i o n
‘ o b s e r v a t i o n ‘ o f i t s o u t p u t , and from i t s i n p u t ‘ i n p u t _ d a t a ‘
‘mu‘ i s t h e a d a p t a t i o n s t e p \ n
Inputs :
=======
observation : array
output of the f i l t e r to i d e n t i f y
input_data : array
Page 239/255
240 CHAPTER 13. ADAPTIVE FILTERS
Figure 13.9: Comparison of true output and estimated one after identification
Page 240/255
13.4. ADAPTIVE VERSIONS 241
input of the f i l t e r to i d e n t i f y
mu : r e a l
adaptation step
p : i n t ( d e f a u l t =2 0)
order of the f i l t e r
h _ i n i t i a l : array ( default h _ i n i t i a l =zeros (20) )
i n i t i a l guess for the f i l t e r
normalized : boolean ( d e f a u l t False )
compute t h e n o r m a l i z e d LMS i n s t e a d o f t h e s t a n d a r d one
Outputs :
========
w: a r r a y
i d e n t i f i e d impulse response
err : array
estimation error
yest : array
estimated output
"""
N=np . s i z e ( i n p u t _ d a t a )
e r r =np . z e r o s (N)
w=np . z e r o s ( ( p , N+ 1 ) )
y e s t =np . z e r o s (N)
#
# DO IT YOURSELF!
#
r e t u r n (w, e r r , y e s t )
d e f i d e n t ( o b s e r v a t i o n , i n p u t _ d a t a , mu , p =20 , h _ i n i t i a l = z e r o s ( 2 0 ) , n o r m a l i z e d =
False ) :
" " " I d e n t i f i c a t i o n o f an i m p u l s e r e s p o n s e from an o b s e r v a t i o n
‘ o b s e r v a t i o n ‘ o f i t s o u t p u t , and from i t s i n p u t ‘ i n p u t _ d a t a ‘ \ n
‘mu‘ i s t h e a d a p t a t i o n s t e p \ n
Inputs :
=======
observation : array
output of the f i l t e r to i d e n t i f y
input_data : array
input of the f i l t e r to i d e n t i f y
mu : r e a l
adaptation step
p : i n t ( d e f a u l t =2 0)
order of the f i l t e r
h _ i n i t i a l : array ( default h _ i n i t i a l =zeros (20) )
i n i t i a l guess for the f i l t e r
Outputs :
========
w: a r r a y
i d e n t i f i e d impulse response
err : array
estimation error
yest : array
estimated output
"""
N=np . s i z e ( i n p u t _ d a t a )
i n p u t _ d a t a = s q u e e z e ( i n p u t _ d a t a ) # r e s h a p e ( i n p u t _ d a t a , ( N) )
observation =squeeze ( observation )
Page 241/255
242 CHAPTER 13. ADAPTIVE FILTERS
e r r =np . z e r o s (N)
w=np . z e r o s ( ( p , N+ 1 ) )
y e s t =np . z e r o s (N)
w[ : , p ]= h _ i n i t i a l
f o r t i n r a n g e ( p , N) :
i f normalized :
mun=mu / ( d o t ( i n p u t _ d a t a [ t : t −p : − 1 ] , i n p u t _ d a t a [ t : t −p : − 1 ] ) +1e −10)
else :
mun=mu
(w [ : , t + 1 ] , e r r [ t ] , y e s t [ t ] ) = lms ( o b s e r v a t i o n [ t ] , i n p u t _ d a t a [ t : t −p : − 1 ] ,w
[ : , t ] , mun )
r e t u r n (w, e r r , y e s t )
n e w h t e s t =np . z e r o s ( L )
LL=np . min ( [ np . s i z e ( h t e s t ) , L ] )
n e w h t e s t [ : LL] = h t e s t [ : LL ]
NN=np . min ( [ np . s i z e ( y e s t ) , 2 0 0 ] )
e r r h = [ sum ( ( n e w h t e s t −w [ : , t ] ) ∗ ∗ 2 ) f o r t i n r a n g e (NN) ]
p l t . p l o t ( t t , e r r h , l a b e l = ’ Q u a d r a t i c e r r o r on h ’ )
p l t . legend ( )
_= p l t . x l a b e l ( ’ Time ’ )
p r i n t ( " I d e n t i f i e d f i l t e r : " ,w[ : , − 1 ] )
Page 242/255
13.4. ADAPTIVE VERSIONS 243
d e f i d e n t ( o b s e r v a t i o n , i n p u t _ d a t a , mu , p =20 , h _ i n i t i a l = z e r o s ( 2 0 ) , n o r m a l i z e d =
False ) :
" " " I d e n t i f i c a t i o n o f an i m p u l s e r e s p o n s e from an o b s e r v a t i o n
‘ o b s e r v a t i o n ‘ o f i t s o u t p u t , and from i t s i n p u t ‘ i n p u t _ d a t a ‘
‘mu‘ i s t h e a d a p t a t i o n s t e p \ n
Inputs :
=======
observation : array
output of the f i l t e r to i d e n t i f y
input_data : array
input of the f i l t e r to i d e n t i f y
mu : r e a l
adaptation step
p : i n t ( d e f a u l t =2 0)
order of the f i l t e r
h _ i n i t i a l : array ( default h _ i n i t i a l =zeros (20) )
i n i t i a l guess for the f i l t e r
normalized : boolean ( d e f a u l t False )
compute t h e n o r m a l i z e d LMS i n s t e a d o f t h e s t a n d a r d one
Outputs :
========
w: a r r a y
i d e n t i f i e d impulse response
err : array
estimation error
yest : array
estimated output
"""
N=np . s i z e ( i n p u t _ d a t a )
e r r =np . z e r o s (N)
w=np . z e r o s ( ( p , N+ 1 ) )
y e s t =np . z e r o s (N)
w[ : , p ]= h _ i n i t i a l
f o r t i n np . a r a n g e ( p , N) :
i f normalized :
a s s e r t mu<2 , " I n t h e n o r m a l i z e d c a s e , mu must be l e s s t h a n 2 "
mun=mu / ( np . d o t ( i n p u t _ d a t a [ t : t −p : − 1 ] , i n p u t _ d a t a [ t : t −p : − 1 ] ) +1e −10)
else :
mun=mu
(w [ : , t + 1 ] , e r r [ t ] , y e s t [ t ] ) = lms ( o b s e r v a t i o n [ t ] , i n p u t _ d a t a [ t : t −p : − 1 ] ,w
[ : , t ] , mun )
r e t u r n (w, e r r , y e s t )
Stability of results
It is very instructive to look at the reproductibility of results when the data change. Let µ fixed and generate
new data. Then apply the identification procedure and plot the learning curve.
p=6 #<−− a c t u a l l e n g t h o f t h e f i l t e r
for ndata in range (30) :
# # G e n e r a t e new d a t a s
N=200
x= l f i l t e r ( [ 1 , 1 ] , [ 1 ] , np . random . r a n d n (N) )
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
y0= l f i l t e r ( h t e s t , [ 1 ] , x )
Page 243/255
244 CHAPTER 13. ADAPTIVE FILTERS
y=y0 + 0 . 1 ∗ r a n d n (N)
i t e r a t i o n s =np . a r a n g e (NN+ 1 )
# −−−−−−−−−−−−−−−−−−−−
f o r mu i n [ 0 . 0 1 ] :
(w, e r r e u r , y e s t ) = i d e n t ( y , x , mu , p=p , h _ i n i t i a l = z e r o s ( p ) )
E r r h = [ sum ( h t e s t −w [ : , n ] ) ∗∗2 f o r n i n r a n g e (NN+ 1) ]
p l t . p l o t ( i t e r a t i o n s , E r r h , l a b e l = " $ \ mu={} $ " . f o r m a t ( mu ) )
p l t . x l i m ( [ 0 , NN+ 1 ] )
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
The data are random; the algorithm is stochastic and so is the learning curve! Fortunately, we still check
that the algorithms converge. . . since the error goes to zero. So, it works.
# # G e n e r a t e new d a t a s
Page 244/255
13.4. ADAPTIVE VERSIONS 245
N=200
x= l f i l t e r ( [ 1 , 1 ] , [ 1 ] , np . random . r a n d n (N) )
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
y0= l f i l t e r ( h t e s t , [ 1 ] , x )
y=y0 + 0 . 1 ∗ r a n d n (N)
# −−−−−−−−−−−−−−−−−−−−
f o r mu i n [ 0 . 0 1 , 0 . 0 2 , 0 . 0 5 , 0 . 0 8 1 ] :
(w, e r r e u r , y e s t ) = i d e n t ( y , x , mu , p=p , h _ i n i t i a l = z e r o s ( p ) )
E r r h = [ sum ( h t e s t −w [ : , n ] ) ∗∗2 f o r n i n r a n g e (NN+ 1) ]
p l t . p l o t ( i t e r , E r r h , l a b e l = " $ \ mu={} $ " . f o r m a t ( mu ) )
p l t . x l i m ( [ 0 , NN+ 1 ] )
p l t . legend ( )
p l t . t i t l e ( " Norm o f t h e e r r o r t o t h e optimum f i l t e r " )
_= p l t . x l a b e l ( " I t e r a t i o n s " )
/usr/local/lib/python3.5/site-packages/scipy/signal/signaltools.py:1344: FutureWarn
out = out_full[ind]
Tracking capabilities
With a constant step-size, the LMS never converge, since while an error exist, the filter is always updated.
A consequence of this fact is that the LMS keeps tracking capabilities, which are especially useful in a
non-stationary context. In the identification concept, it is possible that the filter to be identified varies during
time. In such case, the algorithm must be able to track these modifications. Such an example is simulated
below, where the impulse response is modulated by a cos(), according to
Page 245/255
246 CHAPTER 13. ADAPTIVE FILTERS
# ## Slow non− s t a t i o n a r i t y
N=1000
u=np . random . r a n d n (N)
y=np . z e r o s (N)
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
L= s i z e ( h t e s t )
f o r t i n np . a r a n g e ( L , N) :
y [ t ] = d o t ( ( 1 + c o s ( 2 ∗ p i ∗ t /N) ) ∗ h t e s t , u [ t : t −L : − 1 ] )
y + = 0 . 0 1 ∗ np . random . r a n d n (N)
plt . figure ()
plt . plot (y)
_= p l t . t i t l e ( " O b s e r v e d S i g n a l " )
Then, we can test the identification procedure for this non stationary signal. We check that the error
indeed goes to zero, and that the identified filter seem effectively modulated with a cosine.
p=7
(w, e r r , y e s t ) = i d e n t ( y , u , mu = 0 . 1 , p=p , h _ i n i t i a l = z e r o s ( p ) )
# (w, e r r , y e s t ) = i d e n t ( y , u , mu=1 , p=p , h _ i n i t i a l = z e r o s ( p ) , n o r m a l i z e d = T r u e )
plt . figure (1)
clf ()
plt . plot ( err )
plt . t i t l e ( ’ Identification error ’)
f i g c a p t i o n ( " I d e n t i f i c a t i o n e r r o r in the n o n s t a t i o n a r y case " , l a b e l =" f i g :
error_ns_case " )
plt . figure (2)
plt . clf ()
t =np . a r a n g e ( 0 ,N+ 1 )
t r u e _ n s _ h =np . o u t e r ( ( 1 + c o s ( 2 ∗ p i ∗ t /N) ) , h t e s t )
p l t . p l o t ( t , w . T , lw = 1 )
p l t . p l o t ( t , t r u e _ n s _ h , lw =2 , l a b e l = " T r u e v a l u e s " , a l p h a = 0 . 4 )
p l t . t i t l e ( " Evolution of f i l t e r ’ s c o e f f i c i e n t s " )
f i g c a p t i o n ( " Evolution of f i l t e r ’ s c o e f f i c i e n t s " , l a b e l =" f i g : c o e f f _ n s _ c a s e " )
Page 246/255
13.4. ADAPTIVE VERSIONS 247
Page 247/255
248 CHAPTER 13. ADAPTIVE FILTERS
is exactly the true gradient algorithm. Thus, we would have exactly the same conditions for convergence
as[for the gradient ]algorithm.
[ However,
] this is only an approximation. Indeed, in the third line the equality
E u(n)u(n)T w(n) = E u(n)u(n)T E [w(n)] is incorrect since obviously w(n) depends on u(n) through
the components at times n − 1, n − 2, etc.
Furthermore, it must be stressed that the learning curves are now random. Thus, we can understand that
the convergence conditions are more strict than for the gradient algorithm. A practical rule for the choice of
µ is
2 2
µ= = ,
α Tr [Ruu ] α pRuu (0)
[ ]
where α is a scalar between 2 and 3, Ruu (0) = E |u(n)|2 and p is the dimension of the correlation matrix.
. . . to be continued. . .
Eweda, E., and Macchi, O.. "Quadratic mean and almost-sure convergence of unbounded stochastic ap-
proximation algorithms with correlated observations." Annales de l’institut Henri Poincaré (B) Probabilités
et Statistiques 19.3 (1983): 235-255. <http://eudml.org/doc/77211>.
@articleEweda1983, author = Eweda, E., Macchi, O., journal = Annales de l’institut Henri Poincaré
(B) Probabilités et Statistiques, keywords = almost-sure convergence; correlated observations; quadratic
mean convergence; stochastic gradient algorithm; finite memory; finite moments, language = eng,
number = 3, pages = 235-255, publisher = Gauthier-Villars, title = Quadratic mean and almost-sure
convergence of unbounded stochastic approximation algorithms with correlated observations, url =
http://eudml.org/doc/77211, volume = 19, year = 1983,
Page 248/255
13.4. ADAPTIVE VERSIONS 249
that is ( )
1 − µn u(n)T u(n) ≤ 1.
The optimum value of the step-size corresponds to the minimum of |e(n|n+1)|, which is simply given by
µn = 1
u(n)T u(n)
.
However, the normalized LMS algorithm is often given with an auxiliary factor, say µ̃ , which adds a
tuning parameter the algorithm
( )
w(n + 1) = w(n) − u(n)µ̃T u(n) u(n) w(n)T u(n) − d(n)
Input :
======
d : d e s i r e d sequence at time n
u : input of length p
w : wiener f i l t e r to update
mu : a d a p t a t i o n s t e p f o r t h e NLMS; mu <2
Returns :
=======
w : upated f i l t e r
Page 249/255
250 CHAPTER 13. ADAPTIVE FILTERS
e r r : d−d e s t
d e s t : p r e d i c t i o n = : math : ‘ u ( n ) ^T w‘
"""
a s s e r t mu<2 , " I n t h e n o r m a l i z e d c a s e , mu must be l e s s t h a n 2 "
u= s q u e e z e ( u ) #Remove s i n g l e −d i m e n s i o n a l e n t r i e s from t h e s h a p e o f an
array .
w= s q u e e z e (w)
d e s t =u . d o t (w)
e r r =d−d e s t
mun=mu / ( d o t ( u , u ) +1e −10)
w=w+mun∗u∗ e r r
r e t u r n (w, e r r , d e s t )
dE [ f (e(n))]
w(n + 1) = w(n) − µ (13.46)
dw(n)
[ ]
d f (e(n))
= w(n) − µ E u(n) , (13.47)
dw(n)
(13.48)
d f (e(n))
w(n + 1) = w(n) − µ u(n) .
de(n)
• if f (e) = |e|, then f ′ (e) = sign(e) and we obtain the so-called sign-error algorithm:
This is an early algorithm with very low complexity, which can be implemented without any multipli-
cations (if µ is a power of 2, then the step-size multiplication can be implemented as a bit shift).
• for f (e) = |e|k , then f ′ (e) = k|e|k−1 sign(e), and the stochastic gradient algorithm has the form
See Mathews, ece6550 -chapter4, page 22, for an example of a piecewise linear cost function leading to
a quantization of the error.
Page 250/255
13.4. ADAPTIVE VERSIONS 251
The main problem is the inversion, for each n, of the correlation matrix. Fortunately, it is possible to obtain
a recursive solution which do not need a matrix inversion at all. . . The key here is to invoke the matrix
inversion lemma
[A + BD]−1 = A−1 − A−1 B[I + DA−1 B]−1 DA−1 . (13.50)
Applying this with A = λ R̂uu (n − 1), B = u(n) and C = u(n)H , and denoting
[ ]−1
Kn+1 = R̂uu (n + 1)
we readily obtain
1 1 K(n)u(n + 1)u(n + 1)H K(n)
K(n + 1) = K(n) − 2 , (13.51)
λ λ 1 + λ1 u(k + 1)H K(n)u(k + 1)
and after several lines of calculations, we arrive at the updating formula
Note that there are some notational differences between the LMS and the RLS. For the LMS, the filter
w(n + 1) is calculated based on the data available at time n. For the RLS, w(n + 1) is computed using data
available at time (n + 1). This is just a notational difference – we could easily rename w(n + 1) into say v(n)
and obtain similar indexes. However these notations are traditional, so we follow the classical developments
and equations. What is important however is to note that both filters are calculated using the a priori error,
that is the error using the data at time n and the filter computed using the data at time n − 1.
Initialization - The initialization of the algorithm requires the specification of an initial w(0) which is
usually taken as a null vector. It also requires specifying K(0). Since K(0) is the inverse of the correlation
matrix before the beginning of the iterations, we usually choose Ruu (0) = δ I, with δ very small. So the
inverse is K(0) = δ −1 I, a large value which disappears during the iterations of the algorithm.
An implementation of the RLS algorith is proposed below, using the standard numpy array type
as well as the matrix type. Casting from one type to the other is done by np.matrix or np.array
keywords (which make a copy), or using np.asmatrix or np.asarray keywords.
# Implementation using the array type
d e f a l g o _ r l s ( u , d ,M, plambda ) :
N= s i z e ( u )
# initialization
e= z e r o s (N)
w r l s = z e r o s ( (M, N+ 1) )
K r l s =100∗ e y e (M)
u_v= z e r o s (M)
f o r n i n r a n g e (N) :
Page 251/255
252 CHAPTER 13. ADAPTIVE FILTERS
u_v [ 0 ] = u [ n ]
u_v [ 1 :M]= u_v [ 0 :M−1]# c o n c a t e n a t e ( ( u [ n ] , u_v [ 1 :M] ) , a x i s =0 )
e [ n ] = c o n j ( d [ n ] )−d o t ( c o n j ( u_v ) , w r l s [ : , n ] )
# p r i n t ( " n = { } , E r r e u r de { } " . f o r m a t ( n , e [ n ] ) )
Kn= K r l s / plambda
K r l s =Kn−d o t ( Kn , d o t ( o u t e r ( u_v , c o n j ( u_v ) ) , Kn ) ) / ( 1 + d o t ( c o n j ( u_v ) , d o t ( Kn
, u_v ) ) )
w r l s [ : , n +1]= w r l s [ : , n ] + d o t ( K r l s , u_v ) ∗ c o n j ( e [ n ] )
r e t u r n ( wrls , e )
# # RLS , m a t r i x v e r s i o n
def col ( v ) :
" " " t r a n s f o r m s an a r r a y i n t o a column v e c t o r \ n
T h i s i s t h e e q u i v a l e n t o f x=x ( : ) u n d e r M a t l a b " " "
v= a s m a t r i x ( v . f l a t t e n ( ) )
return reshape (v , ( size (v) ,1) )
d e f a l g o _ r l s _ m ( u , d ,M, plambda ) :
"""
Implementation with the matrix type i n s t e a d of the array type
"""
N= s i z e ( u )
# initialization
e= z e r o s (N)
w r l s = m a t r i x ( z e r o s ( (M, N+ 1) ) )
K r l s =100∗ m a t r i x ( e y e (M) )
u= c o l ( u )
u_v= m a t r i x ( c o l ( z e r o s (M) ) )
f o r n i n r a n g e (N) :
u_v [ 0 ] = u [ n ]
u_v [ 1 :M]= u_v [ 0 :M−1]
# u_v= c o n c a t e n a t e ( u [ n ] , u_v [ :M] , a x i s =0 )
e [ n ] = c o n j ( d [ n ] )−u_v . H∗ w r l s [ : , n ]
Kn= K r l s / plambda
K r l s =Kn−Kn ∗ ( u_v ∗ u_v . H∗Kn ) / ( 1 + u_v . H∗Kn∗ u_v )
w r l s [ : , n +1]= w r l s [ : , n ] + K r l s ∗ u_v ∗ c o n j ( e [ n ] )
r e t u r n ( wrls , e )
At this point, it would be useful to do again the previous experimentations (identification with non stationary
data) with the RLS algorithm. Then to compare and conclude.
d e f i d e n t _ r l s ( o b s e r v a t i o n , i n p u t _ d a t a , f a c t o r _ l a m b d a = 0 . 9 5 , p =2 0) :
" " " I d e n t i f i c a t i o n o f an i m p u l s e r e s p o n s e from an o b s e r v a t i o n
‘ o b s e r v a t i o n ‘ o f i t s o u t p u t , and from i t s i n p u t ‘ i n p u t _ d a t a ‘ \ n
‘mu‘ i s t h e a d a p t a t i o n s t e p \ n
Inputs :
=======
observation : array
output of the f i l t e r to i d e n t i f y
input_data : array
input of the f i l t e r to i d e n t i f y
factor_lambda : r e a l ( defaut value =0.95)
f o r g u e t t i n g f a c t o r i n t h e RLS a l g o r i t h m
p : i n t ( d e f a u l t =2 0)
order of the f i l t e r
Outputs :
Page 252/255
13.4. ADAPTIVE VERSIONS 253
========
w: a r r a y
i d e n t i f i e d impulse response
err : array
estimation error
yest : array
estimated output
"""
N=np . s i z e ( i n p u t _ d a t a )
i n p u t _ d a t a = s q u e e z e ( i n p u t _ d a t a ) # r e s h a p e ( i n p u t _ d a t a , ( N) )
observation =squeeze ( observation )
( wrls , e ) = a l g o _ r l s ( i n p u t _ d a t a , o b s e r v a t i o n , p , factor_lambda )
# (w [ : , t + 1 ] , e r r e u r [ t ] , y e s t [ t ] ) = lms ( i n p u t _ d a t a [ t : t −p : − 1 ] ,w [ : , t ] , mun )
r e t u r n ( wrls , e )
# ## Slow non− s t a t i o n a r i t y
N=1000
u=np . random . r a n d n (N)
y=np . z e r o s (N)
h t e s t =10∗ np . a r r a y ( [ 1 , 0 . 7 , 0 . 7 , 0 . 7 , 0 . 3 , 0 ] )
L= s i z e ( h t e s t )
f o r t i n np . a r a n g e ( L , N) :
y [ t ] = d o t ( ( 1 + c o s ( 2 ∗ p i ∗ t /N) ) ∗ h t e s t , u [ t : t −L : − 1 ] )
y + = 0 . 0 1 ∗ np . random . r a n d n (N)
plt . figure ()
plt . plot (y)
_= p l t . t i t l e ( " O b s e r v e d S i g n a l " )
p=7
lamb = 0 . 9 7
(w, e r r ) = i d e n t _ r l s ( y , u , f a c t o r _ l a m b d a =lamb , p = 10 )
plt . figure (1)
clf ()
plt . plot ( err )
Page 253/255
254 CHAPTER 13. ADAPTIVE FILTERS
References:
• http://www.ece.utah.edu/~mathews/ece6550/chapter10.pdf
• http://www.cs.tut.fi/~tabus/course/ASP/LectureNew10.pdf
Page 254/255
13.4. ADAPTIVE VERSIONS 255
Page 255/255