wavelets/ 40755 1040 1001 0 7321702445 11012 5ustar everyonewavelets/amat_close100644 1040 1001 24011 7244647213 13164 0ustar everyone12/18/1997 28.125 12/19/1997 30 12/22/1997 30.5 12/23/1997 28.625 12/24/1997 28.9375 12/26/1997 29.4375 12/29/1997 29.5625 12/30/1997 30.4375 12/31/1997 30.125 01/02/1998 31 01/05/1998 33.125 01/06/1998 31.25 01/07/1998 29.5 01/08/1998 29.3125 01/09/1998 28 01/12/1998 28.0625 01/13/1998 29.9375 01/14/1998 29.4375 01/15/1998 29.9375 01/16/1998 29.75 01/20/1998 30.75 01/21/1998 32.25 01/22/1998 30.1875 01/23/1998 30.5625 01/26/1998 30.25 01/27/1998 30.8125 01/28/1998 32.75 01/29/1998 31.75 01/30/1998 32.8125 02/02/1998 34.6875 02/03/1998 34.875 02/04/1998 36.875 02/05/1998 35.6875 02/06/1998 36 02/09/1998 34.75 02/10/1998 36 02/11/1998 35.9375 02/12/1998 36.9375 02/13/1998 37.5 02/17/1998 36.375 02/18/1998 36.8125 02/19/1998 37.6875 02/20/1998 36.375 02/23/1998 37.3125 02/24/1998 36 02/25/1998 37.375 02/26/1998 37.875 02/27/1998 36.8125 03/02/1998 35.4375 03/03/1998 35.375 03/04/1998 35.5 03/05/1998 33.1875 03/06/1998 33.5 03/09/1998 31.96875 03/10/1998 33.3125 03/11/1998 33.125 03/12/1998 32.9375 03/13/1998 33.875 03/16/1998 34.8125 03/17/1998 33.3125 03/18/1998 33.125 03/19/1998 33.875 03/20/1998 33.25 03/23/1998 33.5625 03/24/1998 34.125 03/25/1998 35.25 03/26/1998 36.25 03/27/1998 35.375 03/30/1998 35.3125 03/31/1998 35.3125 04/01/1998 37.625 04/02/1998 38.875 04/03/1998 37.5 04/06/1998 36.5625 04/07/1998 35.25 04/08/1998 35.25 04/09/1998 34.75 04/13/1998 34.25 04/14/1998 36.1875 04/15/1998 37.0625 04/16/1998 35.875 04/17/1998 35.4375 04/20/1998 36.6875 04/21/1998 38.125 04/22/1998 38.3125 04/23/1998 37.0625 04/24/1998 36.9375 04/27/1998 35.875 04/28/1998 35.75 04/29/1998 35.875 04/30/1998 36.125 05/01/1998 36.0625 05/04/1998 35.875 05/05/1998 36.4375 05/06/1998 36.3125 05/07/1998 36.125 05/08/1998 36.75 05/11/1998 37.25 05/12/1998 37.9375 05/13/1998 39.5 05/14/1998 38.5625 05/15/1998 37 05/18/1998 37.8125 05/19/1998 37.1875 05/20/1998 35.75 05/21/1998 34.0625 05/22/1998 33.25 05/26/1998 32.8125 05/27/1998 32.3125 05/28/1998 33.75 05/29/1998 32 06/01/1998 29.9375 06/02/1998 30.5625 06/03/1998 29.375 06/04/1998 28.75 06/05/1998 29.625 06/08/1998 30.125 06/09/1998 30.125 06/10/1998 28.75 06/11/1998 28.25 06/12/1998 28.625 06/15/1998 27.1875 06/16/1998 29.125 06/17/1998 27.5 06/18/1998 27.9375 06/19/1998 27.375 06/22/1998 28.5625 06/23/1998 29.375 06/24/1998 30.75 06/25/1998 29.875 06/26/1998 30 06/29/1998 30.3125 06/30/1998 29.5 07/01/1998 30.4375 07/02/1998 29.6875 07/06/1998 30.375 07/07/1998 31.375 07/08/1998 30 07/09/1998 29 07/10/1998 29.5 07/13/1998 28.5625 07/14/1998 28.5 07/15/1998 30.625 07/16/1998 31.1875 07/17/1998 30.8125 07/20/1998 30.625 07/21/1998 31.9375 07/22/1998 31.3125 07/23/1998 30.5625 07/24/1998 30.375 07/27/1998 31.0625 07/28/1998 32.8125 07/29/1998 32 07/30/1998 33 07/31/1998 33.5 08/03/1998 33.8125 08/04/1998 32.5 08/05/1998 34.0625 08/06/1998 35.125 08/07/1998 35.125 08/10/1998 33.6875 08/11/1998 32.875 08/12/1998 32.125 08/13/1998 31.375 08/14/1998 31.125 08/17/1998 33.3125 08/18/1998 34.0625 08/19/1998 32.6875 08/20/1998 31.25 08/21/1998 30.5 08/24/1998 30.6875 08/25/1998 31.4375 08/26/1998 29.5625 08/27/1998 27.3125 08/28/1998 26.625 08/31/1998 24.5625 09/01/1998 26.125 09/02/1998 24.875 09/03/1998 24 09/04/1998 23.75 09/08/1998 25.5625 09/09/1998 24.125 09/10/1998 24.1875 09/11/1998 24.9375 09/14/1998 24.25 09/15/1998 23.6875 09/16/1998 24.0625 09/17/1998 24 09/18/1998 24.5 09/21/1998 24.75 09/22/1998 24.9375 09/23/1998 25.6875 09/24/1998 25.5 09/25/1998 26.875 09/28/1998 26.5625 09/29/1998 26.75 09/30/1998 25.25 10/01/1998 23.75 10/02/1998 23.9375 10/05/1998 23.125 10/06/1998 23.5625 10/07/1998 23 10/08/1998 22.375 10/09/1998 23.625 10/12/1998 26.8125 10/13/1998 26.4375 10/14/1998 27.3125 10/15/1998 28.8125 10/16/1998 29.3125 10/19/1998 30.5625 10/20/1998 29.3125 10/21/1998 30.3125 10/22/1998 32.875 10/23/1998 33.9375 10/26/1998 33.3125 10/27/1998 31.875 10/28/1998 34.1875 10/29/1998 35.4375 10/30/1998 34.6875 11/02/1998 33.4375 11/03/1998 32.75 11/04/1998 35.25 11/05/1998 34.875 11/06/1998 36.9375 11/09/1998 36.1875 11/10/1998 35.625 11/11/1998 37.5625 11/12/1998 38.75 11/13/1998 38.25 11/16/1998 36.3125 11/17/1998 37.375 11/18/1998 36.9375 11/19/1998 38 11/20/1998 39.3125 11/23/1998 40.0625 11/24/1998 42.3125 11/25/1998 40.6875 11/27/1998 41.375 11/30/1998 38.75 12/01/1998 39.4375 12/02/1998 43.8125 12/03/1998 42.8125 12/04/1998 44.1875 12/07/1998 44.25 12/08/1998 45.4375 12/09/1998 43.4375 12/10/1998 41.1875 12/11/1998 41.9375 12/14/1998 39.125 12/15/1998 40.625 12/16/1998 39.5625 12/17/1998 41.125 12/18/1998 45.4375 12/21/1998 44.6875 12/22/1998 43.4375 12/23/1998 44.6875 12/24/1998 43.9375 12/28/1998 43.6875 12/29/1998 43.5 12/30/1998 41.875 12/31/1998 42.6875 01/04/1999 44.3125 01/05/1999 49.75 01/06/1999 53.5625 01/07/1999 54.625 01/08/1999 55.5 01/11/1999 55.5 01/12/1999 53.4375 01/13/1999 54.875 01/14/1999 53.25 01/15/1999 56.1875 01/19/1999 54.875 01/20/1999 57.5625 01/21/1999 55.3125 01/22/1999 54.625 01/25/1999 54 01/26/1999 58.5625 01/27/1999 56.1875 01/28/1999 59 01/29/1999 63.1875 02/01/1999 62.1875 02/02/1999 59.1875 02/03/1999 63.375 02/04/1999 61.75 02/05/1999 60.6875 02/08/1999 67.4375 02/09/1999 63.25 02/10/1999 60.8125 02/11/1999 67.0625 02/12/1999 66.875 02/16/1999 67.875 02/17/1999 67.375 02/18/1999 66.25 02/19/1999 68.6875 02/22/1999 68.1875 02/23/1999 68.4375 02/24/1999 68.25 02/25/1999 63.75 02/26/1999 55.625 03/01/1999 57.0625 03/02/1999 57.3125 03/03/1999 58.25 03/04/1999 57.5625 03/05/1999 61.6875 03/08/1999 63.25 03/09/1999 58.875 03/10/1999 61 03/11/1999 61.625 03/12/1999 60.875 03/15/1999 60.5625 03/16/1999 63.625 03/17/1999 63.9375 03/18/1999 63.875 03/19/1999 60.9375 03/22/1999 60.0625 03/23/1999 58.6875 03/24/1999 57.375 03/25/1999 61.0625 03/26/1999 57.8125 03/29/1999 61.375 03/30/1999 62.5 03/31/1999 61.6875 04/01/1999 64.375 04/05/1999 65.75 04/06/1999 67.9375 04/07/1999 64.75 04/08/1999 65.125 04/09/1999 66.5 04/12/1999 62.6875 04/13/1999 59.8125 04/14/1999 59 04/15/1999 62 04/16/1999 61.375 04/19/1999 58.6875 04/20/1999 56.75 04/21/1999 61.375 04/22/1999 61.8125 04/23/1999 60.75 04/26/1999 60.75 04/27/1999 58.9375 04/28/1999 54.1875 04/29/1999 51.875 04/30/1999 53.625 05/03/1999 55.25 05/04/1999 53.0625 05/05/1999 57.4375 05/06/1999 53.9375 05/07/1999 56.75 05/10/1999 56.1875 05/11/1999 56.6875 05/12/1999 60.3125 05/13/1999 59.03125 05/14/1999 60.6875 05/17/1999 63.25 05/18/1999 63.125 05/19/1999 65.3125 05/20/1999 61.5625 05/21/1999 59.5625 05/24/1999 56.25 05/25/1999 55.3125 05/26/1999 54.5 05/27/1999 55.25 05/28/1999 55 06/01/1999 54.0625 06/02/1999 57.625 06/03/1999 57.25 06/04/1999 60.8125 06/07/1999 61.8125 06/08/1999 61.8125 06/09/1999 64.8125 06/10/1999 66 06/11/1999 65.8125 06/14/1999 66 06/15/1999 67.375 06/16/1999 70.1875 06/17/1999 69.1875 06/18/1999 66.625 06/21/1999 70 06/22/1999 68.6875 06/23/1999 69.375 06/24/1999 65.8125 06/25/1999 64.8125 06/28/1999 66.375 06/29/1999 69.125 06/30/1999 73.875 07/01/1999 72.375 07/02/1999 72.125 07/06/1999 74.5 07/07/1999 70.5 07/08/1999 72.9375 07/09/1999 74.125 07/12/1999 73 07/13/1999 72.5 07/14/1999 77.5 07/15/1999 76.375 07/16/1999 78.5625 07/19/1999 77.5625 07/20/1999 72.3125 07/21/1999 72.6875 07/22/1999 68.25 07/23/1999 71 07/26/1999 69.375 07/27/1999 72.5625 07/28/1999 75.1875 07/29/1999 71.8125 07/30/1999 71.9375 08/02/1999 71.875 08/03/1999 73.75 08/04/1999 72.875 08/05/1999 72.4375 08/06/1999 74.6875 08/09/1999 77.875 08/10/1999 76.9375 08/11/1999 77.75 08/12/1999 72.625 08/13/1999 69.75 08/16/1999 70.5 08/17/1999 71.75 08/18/1999 65.0625 08/19/1999 65.3125 08/20/1999 63.9375 08/23/1999 66.875 08/24/1999 69.75 08/25/1999 68.1875 08/26/1999 67.6875 08/27/1999 68.9375 08/30/1999 70 08/31/1999 71.0625 09/01/1999 74 09/02/1999 74.4375 09/03/1999 77.375 09/07/1999 77.1875 09/08/1999 76.625 09/09/1999 79 09/10/1999 78.75 09/13/1999 76 09/14/1999 81.5625 09/15/1999 78.125 09/16/1999 81.4375 09/17/1999 84.8125 09/20/1999 83.8125 09/21/1999 81.3125 09/22/1999 83.6875 09/23/1999 78.875 09/24/1999 78.875 09/27/1999 82.375 09/28/1999 80.6875 09/29/1999 79.75 09/30/1999 77.6875 10/01/1999 78.1875 10/04/1999 82.0625 10/05/1999 85.5625 10/06/1999 86.75 10/07/1999 82.4375 10/08/1999 82.25 10/11/1999 82.75 10/12/1999 81.25 10/13/1999 79.5625 10/14/1999 80.28125 10/15/1999 79.875 10/18/1999 77.75 10/19/1999 74.75 10/20/1999 78.5 10/21/1999 79.1875 10/22/1999 78.8125 10/25/1999 80.3125 10/26/1999 80.125 10/27/1999 79.3125 10/28/1999 83.75 10/29/1999 89.8125 11/01/1999 87.75 11/02/1999 91.125 11/03/1999 94.4375 11/04/1999 92.75 11/05/1999 98 11/08/1999 97.1875 11/09/1999 99.4375 11/10/1999 101.75 11/11/1999 108.5 11/12/1999 109 11/15/1999 105.25 11/16/1999 105.5 11/17/1999 110 11/18/1999 107 11/19/1999 107.25 11/22/1999 103.3125 11/23/1999 102.875 11/24/1999 102.4375 11/26/1999 102 11/29/1999 101.3125 11/30/1999 97.4375 12/01/1999 100.5 12/02/1999 107.75 12/03/1999 110.25 12/06/1999 114.3125 12/07/1999 111.25 12/08/1999 114.8125 12/09/1999 112.6875 12/10/1999 109.4375 12/13/1999 108.0625 12/14/1999 104.5625 12/15/1999 103.25 12/16/1999 110.5625 12/17/1999 110.75 12/20/1999 116.3125 12/21/1999 123.625 12/22/1999 120.9375 12/23/1999 121.625 12/27/1999 127.6875 12/28/1999 126.0625 12/29/1999 126.375 12/30/1999 124.375 12/31/1999 126.6875 wavelets/clean_script100644 1040 1001 250 7303100635 13446 0ustar everyone find . -name "*.html" -exec rm {} \; find . -name "*.class" -exec rm {} \; find . -name "*.css" -exec rm {} \; find . -name "*~" -exec rm {} \; rm -f package-listwavelets/curve_plot_test.java100644 1040 1001 6106 7317423127 15200 0ustar everyone import java.io.*; import wavelets.*; import wavelet_util.*; import dataInput.*; /**

Generate histograms for the Haar coefficients created by applying the Haar transform to the time series for the Applied Materials (symbol: AMAT) daily close price. Plot the histograms along with a normal curve with a mean and standard deviation calculated from the coefficients.

There are 512 data points in the AMAT daily close price time series. This program generates a histogram for the first three high frequency sets of coefficients (e.g., 256, 128, and 64 coefficients).

Financial theory states that the average daily return (e.g, the difference between today'ss close prices and yesterday's close price) is normally distributed. So the histogram of the highest frequency coefficients, which reflect the difference between two close prices, should be bell curve shaped, centered around zero.

The close price in the AMAT time series rises sharply about half way through. So as the coefficient frequency decreases, the histogram will be shifted farther and farter away from zero.

Note that an inplace Haar transform is used that replaces the values with the coefficients. The order function orders the coefficients from the butterfly pattern generated by the inplace algorithm into increasing frequencies, where the lowest frequency is at the beginning of the array. Each frequency is a power of two: 2, 4, 8, 16, 32, 64, 128, 256.

*/ class curve_plot_test { /**

Write out the log of the close price. Daily return is usually expressed as the log of today's close minus the log of yesterday's close. This function allows a plot of the log close price to be compared to the close price.

*/ public class plot_log extends plot { String class_name() { return "plot_log"; } public plot_log( double v[] ) { String file_name = "log_coef"; PrintWriter prStr = OpenFile( file_name ); if (prStr != null) { for (int i = 0; i < v.length; i++) { prStr.println(i + " " + v[i] ); } prStr.close(); } } } // class plot_log public void plot_log( double v[] ) { plot_log t = new plot_log( v ); } public static void main( String[] args ) { String timeSeriesFile = "amat_close"; // Applied Materials Close prices tsRead data = new tsRead( timeSeriesFile ); // // The wavelet algorithms work on arrays whose length is a power // of two. Set the length to the nearest power of two that is // less than or equal to the data length. // int len = data.getSize(); if (len > 0) { int newSize = binary.nearestPower2( len ); data.setSize( newSize ); double[] vals = data.getArray(); if (vals != null) { wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.wavelet_calc( vals ); haar.order(); bell_curves curves = new bell_curves(); curves.plot_curves( vals ); } } } // main } wavelets/dataInput/ 40755 1040 1001 0 7321417414 12742 5ustar everyonewavelets/dataInput/tsRead.java100644 1040 1001 25702 7310515174 15152 0ustar everyone package dataInput; import java.io.*; import double_vec.*; /**

class tsRead

Read a time series file. The format of the file is

  date  double-value

This class was written with daily financial time series in mind. It needs to be modified to handle intra-day time series or any other time series that has a time stamp as well.

The class is used by instantiating the constructor with the file name for the time series. For example:

  tsRead ts = new tsRead( time_series );

The tsRead class reads the time series into a double_vec object.

Once data is read into the object (e.g., the object is constructed) the data is referenced by getting it as an array (double[]) via the getArray function.

Getting the data as an array is inefficient, since the data must be copied into the array first. This course is followed for two reasons:

  1. The size of the input data is not known in advance, so a growable data structure, like double_vec is used.

  2. Although the double_vec data structure works well for storing the input data, it is awkward when it comes to numeric computation, since the only way to reference elements is via the elementAt() function. Compared to the [] operator, this tends to obscure numeric algorithms making the code more difficult to understand. This is a real drawback for the wavelet algorithms, which are already complex enough.

So the input data is referenced via an array, even though we must pay the cost of copying. Although operator overloading can create its own problems, this is a case where operator overloading would be useful, since in C++ double_vec could have been used directly.

Copyright and Use

You may use this source code without limitation and without fee as long as you include:

This software was written and is copyrighted by Ian Kaplan, Bear Products International, www.bearcave.com, 2001.

This software is provided "as is", without any warrenty or claim as to its usefulness. Anyone who uses this source code uses it at their own risk. Nor is any support provided by Ian Kaplan and Bear Products International.

Please send any bug fixes or suggested source changes to:

     iank@bearcave.com
*/ public class tsRead { /** class badData : an exception for bad data in the file. */ private class badDataError extends Exception { public badDataError() { super(); } public badDataError( String m ) { super(m); } } // badData PushbackInputStream pushStream; private double_vec timeSeries; private boolean fileOpen = false; /** skip_spaces Skip white space characters. As with all the code in this class, it is assumed that characters are 8-bits. Return true if EOF is reached. Otherwise return false. The function will read until a non-white character or end of file is reached. The non-white space character is pushed back into the input stream. */ private boolean skip_spaces() { int ch = 0; try { while (Character.isWhitespace( (char)(ch = pushStream.read()) )) /* nada */; if (ch > 0) { try { pushStream.unread( ch ); } catch (Exception e) { System.out.println("skip_spaces: push back error - " + e.getMessage() ); } } } catch (Exception e) { System.out.println("skip_spaces: read error - " + e.getMessage() ); } return (ch == 0); } // skip_spaces /** *

read_date

Currently dates are skipped. The time series code assumes that dates are continuous and there are no holes in the data.

For consistency sake there is some checking done. The code assumes that dates are composed of numbers or '/' characters.

When this class is called it is assumed that the first character in the stream is a number. The function reads numbers or '/' characters until the end of line is reached or there is a space.

The format for a date is

       (number)+ '/' (number)+ / (number)+
*/ private boolean read_date() throws badDataError { int ch = 0; boolean foundDigit = false; boolean rslt = false; badDataError err = new badDataError( "Bad date format" ); try { for (int i = 0; i < 3; i++) { foundDigit = false; while (Character.isDigit( (char)(ch = pushStream.read()) )) { foundDigit = true; } if (ch < 0) { rslt = true; // we've reached EOF break; } if (! foundDigit ) throw err; if (i < 2) if ((char)ch != '/') throw err; } // for } catch (Exception e) { System.out.println("read_date: read error - " + e.getMessage() ); } if (foundDigit) { // push the character after the date back into the // input stream. try { pushStream.unread( ch ); } catch (Exception e) { System.out.println("read_date: push back error - " + e.getMessage() ); } } return rslt; } // read_date /** read_double

Read the character string for a floating point value and convert it into a double. The format for a double is

       [+|-] (digit)+ '.' (digit)+

The function is passed a reference to an array of doubles. It returns dv[0] with the value (Java has not pass by reference except via objects). If the function reaches EOF, it return false, otherwise true. */ private boolean read_double( double[] dv ) throws badDataError { final int DBL_SIGN = 1; // + | - final int DBL_INT = 2; // the part that is > 1 => (digit)+ final int DBL_FRAQ = 3; // fraction after '.' => (digit)+ final int DBL_DONE = 4; // finished parsing double int state = DBL_SIGN; String s = new String(); boolean rslt = false; char ch = 0; dv[0] = 0.0; try { while (state != DBL_DONE) { ch = (char)(pushStream.read()); if (ch > 0xff) { // EOF, since we're not supporting wide chars rslt = true; state = DBL_DONE; } switch (state) { case DBL_SIGN: if (ch == '+' || ch == '-' || ch == '.' || Character.isDigit( ch )) { s = s + ch; if (ch == '.') state = DBL_FRAQ; else state = DBL_INT; } else { badDataError err = new badDataError(); throw err; } break; case DBL_INT: if (Character.isDigit( ch ) || ch == '.') { s = s + ch; if (ch == '.') state = DBL_FRAQ; } else { state = DBL_DONE; } break; case DBL_FRAQ: if (Character.isDigit( ch )) { s = s + ch; } else { state = DBL_DONE; } break; } // switch } // while } // try catch (Exception e) { System.out.println("read_double: error reading file - " + e.getMessage() ); } // push the last character back into the input stream try { pushStream.unread( ch ); } catch (Exception e) { System.out.println("read_double: push back error - " + e.getMessage() ); } if (s.length() > 0) { // convert string to double Double dblObj = new Double( s ); dv[0] = dblObj.doubleValue(); } return rslt; } // read_double /**

time_series_read

Read a time series file into a double_vec object */ private void time_series_read() { timeSeries = new double_vec(); boolean end_of_file = false; double dv[] = new double[1]; try { while (! end_of_file ) { end_of_file = skip_spaces(); if (! end_of_file) { end_of_file = read_date(); if (! end_of_file) { end_of_file = skip_spaces(); if (! end_of_file) { end_of_file = read_double( dv ); if (! end_of_file) { timeSeries.append( dv[0] ); } // read_double } // skip_spaces } // read_date } // skip_spaces } // while } catch (Exception e) { if (e instanceof badDataError) { System.out.println("bad data: " + e.getMessage() ); } } } // time_series_read private boolean openStream( String name ) { boolean rslt = false; try { FileInputStream fStream; fStream = new FileInputStream( name ); // Allocating the FileInputStream opens the file. // If the open fails, either because access is not // allowed or because it does not exist, // an exception will be thrown. If this happens we // will not proceed to the next line. pushStream = new PushbackInputStream( fStream ); rslt = true; } catch (Exception e ) { if (e instanceof FileNotFoundException) { System.out.println("could not open file " + name ); } else if (e instanceof SecurityException) { System.out.println("not allowed to open file " + name ); } else { System.out.println(e.toString() + "unexpected exception" ); } } return rslt; } // openStream /** setSize

Set the size of the double_vec. This function is useful if the size needs to be set to the nearest power of two less than or equal to the data size before getting the data as an array. The wavelet algorithms do no work on arrays whose length is not a power of two. */ public void setSize( int size ) { if (timeSeries != null) { timeSeries.set_size( size ); } } // setSize /** getSize Return the size of the double_vec. */ public int getSize() { int size = 0; if (timeSeries != null) { size = timeSeries.length(); } return size; } // getSize /** getArray

Return the data in a double[] object. */ public double[] getArray() { double[] dblArray = null; if (timeSeries != null) { int len = timeSeries.length(); if (len > 0) { dblArray = new double[len]; for (int i = 0; i < len; i++) { dblArray[i] = timeSeries.elementAt( i ); } // for } } return dblArray; } // getArray /**

tsRead constructor

The tsRead constructor is passed a file (or file name path) name. If the open succeeds the time series will be read into the object. The time series is read into a double_vec object which is returned by the getDoubleVec() function. */ public tsRead( String file_name ) { if (openStream( file_name )) { time_series_read(); } } // tsRead } // tsRead wavelets/double_vec/ 40755 1040 1001 0 7321417414 13120 5ustar everyonewavelets/double_vec/double_vec.java100644 1040 1001 13666 7276407355 16237 0ustar everyone package double_vec; /** *

class double_vec

The double_vec class is based on a C++ template (sadly Java does not have generics yet). This class (and the template it is based on) is similar in design to the C++ Standard Template Library template. The double_vec class supports a growable array to which elements can be continuously added.

The double_vec class is designed for dense arrays where the all elements of the array are used (e.g., there are no empty elements).

Usage:

A doubling algorithm is used when the data size is expanded because it minimizes the amount of copying that must be done. The array will quickly grow to a the size needed to accomodate the data set and no more copying will be necessary. For large arrays this has the drawback that more memory may be allocated than is needed, since the amount of memory used grows exponentially.

The template on which the double_vec class was based was used to implement a String container class. As a result, this class may be overkill as a double container.

Usage

Using the elementAt() and setElementAt() functions can obscure the clarity of numeric code. One way around this is to use the getData() function to return the array and the length() function to return the amount of data in the array. (Note that the array will usually be larger than the number of data elements).

Copyright and Use

You may use this source code without limitation and without fee as long as you include:

This software was written and is copyrighted by Ian Kaplan, Bear Products International, www.bearcave.com, 2001.

This software is provided "as is", without any warrenty or claim as to its usefulness. Anyone who uses this source code uses it at their own risk. Nor is any support provided by Ian Kaplan and Bear Products International.

Please send any bug fixes or suggested source changes to:

     iank@bearcave.com
*/ public class double_vec { private final static int StartArraySize = 128; private int num_elem; // number of data elements currently in the array private int array_size; // array size (e.g., data capacity) double[] array; /** *

Double the amount of memory allocated for the array. Return true if memory allocation succeeded, false otherwise. */ private void twice() { double[] old_array = array; int new_size = array_size * 2; // if "new" fails it will throw an OutOfMemoryError exception array = new double[ new_size ]; for (int i = 0; i < array_size; i++) { array[i] = old_array[i]; } array_size = new_size; } // twice /** * Allocate an array whose initial size is StartArraySize */ public double_vec() { array = new double[ StartArraySize ]; num_elem = 0; array_size = StartArraySize; } // double_vec constructor /** * Return the number of elements currently in the array */ public int length() { return num_elem; } /** * Assign array element ix the value val. */ public void setElementAt( double val, int ix ) throws IndexOutOfBoundsException { if (ix >= num_elem) { String msg = "double_vec: setElementAt(" + ix + ") out of bounds"; throw new IndexOutOfBoundsException(msg); } array[ix] = val; } /** * Return the array element at index ix. If ix is out of range an IndexOutOfBoundsException will be thrown. */ public double elementAt( int ix ) throws IndexOutOfBoundsException { if (ix >= num_elem) { String msg = "double_vec: elementAt(" + ix + ") out of bounds"; throw new IndexOutOfBoundsException(msg); } return array[ix]; } /** * Return a reference to the double_vec array. */ public double[] getData() { return array; } /** * Append an item to the end of the array */ public void append( double val ) { if (num_elem == array_size) { twice(); } array[ num_elem ] = val; num_elem++; } // append /** *

Expand the number of array data slots by "amount" elements. Note that "array_size" is the total amount of storage available for data slots. "num_elem" is the number of data slots occupied by data. The bounds over which the array can be indexed is governed by num_elem. Note that after expand() is called the new data elements can be read, but their value is undefined until they are initialized. */ public void expand( int amount ) { while (num_elem + amount >= array_size) { twice(); } num_elem += amount; } // expand /** * Remove one item from the end of the array. */ public void remove() { if (num_elem > 0) num_elem--; } // remove /** *

Set the number of data elements in the array to a new value (note that this will usually be smaller than the array size, unless a power of two is chosen for "new_size"). */ public void set_size( int new_size ) { if (new_size <= array_size) { num_elem = new_size; } else { // new_size > array_size int num_new_elem = new_size - num_elem; expand( num_new_elem ); } } // set_size } // double_vec wavelets/experimental/ 40755 1040 1001 0 7317772116 13516 5ustar everyonewavelets/experimental/histo.java100644 1040 1001 10641 7316771656 15635 0ustar everyone package experimental; import java.io.*; import java.lang.Math.*; import sort.*; /**

Support for generating histograms for the Haar wavelet coefficients.

*/ public class histo extends plot { public class mean_median { public double mean; // mean (a.k.a. average public double median; // middle of the sorted number sequence public double low; // low value public double high; // high value } public class bin { public bin() {}; public double start; // x-axis public double percent; // y-axis } // bin public bin alloc_bin() { bin b = new bin(); return b; } /** Generate a histogram array from the input vector. The data in the input vector is modified.

The values to be histogrammed are sorted to obtain a low and a high value.

The histogram is calculated by dividing the number range into a set of bins. Each bin has a start value. The end value is the start of the next bin. The sorted input array is traversed and the number in each bin range are counted. Each bin is set to the associated count. The histogram is graphed with the bin range on the x-axis and the count on the y-axis.

The histogram range has zero elements at the top and bottom to improve the appearance of the histogram when plotted with gnuplot.

The function also returns the mean (a.k.a average) and the median in the argument m. The median is the middle of a sorted list of numbers. In the case of the wavelet algorithm, all arrays are a power of two. So the middle value is calculated from the two values on either side of the middle. For example, if the vector has 32 numbers, the median is calculated from the average of v[15] and v[16].

*/ private bin[] calcHisto( double v[], mean_median m ) { bin histoBins[] = null; m.mean = 0.0; m.median = 0.0; if (v != null && v.length > 0) { final int numSteps = 21; qsort.sort( v ); double step = (v[v.length-1] - v[0]) / numSteps; double start = v[0] - step; histoBins = new bin[ numSteps + 3 ]; for (int i = 0; i < histoBins.length; i++) { bin b = new bin(); b.percent = 0.0; b.start = start; start = start + step; histoBins[i] = b; } int binIx = 0; int i = 0; start = v[0] - step; double end = v[0]; histoBins[binIx].start = start; histoBins[binIx].percent = 0.0; double sum = 0.0; int count = 0; double total = 0; while (i < v.length && binIx < histoBins.length) { if (v[i] >= start && v[i] < end) { sum = sum + v[i]; count++; i++; } else { histoBins[binIx].percent = (double)count / v.length; total = total + histoBins[binIx].percent; count = 0; binIx++; if (binIx < histoBins.length) { start = end; end = start + step; } } } // while System.out.println("Total = " + total ); System.out.println("sum = " + sum ); if (m != null) { int half = v.length >> 1; // half = length / 2; m.mean = sum / v.length; m.median = (v[half-1] + v[half])/2; m.low = v[0]; m.high = v[v.length-1]; } } // if return histoBins; } // calcHisto /**

histo class constructor

The histo class constructor is initialized with an array of doubles and a String that contains the path for a file name. The constructor writes out a file that contains histogram plot data formatted for gnuplot. The input array vals is sorted.

The function returns the mean (a.k.a. average) and the median in a two element double array.

*/ public mean_median histogram( double vals[], String path ) { mean_median m = null; if (vals != null && vals.length > 0) { PrintWriter prStr = OpenFile( path ); if (prStr != null) { m = new mean_median(); bin histoBins[] = calcHisto( vals, m ); prStr.println("#"); prStr.println("# mean = " + m.mean); prStr.println("# median = " + m.median ); prStr.println("#"); for (int i = 0; i < histoBins.length; i++) { prStr.println(" " + histoBins[i].start + " " + histoBins[i].percent ); } prStr.close(); } } return m; } // histogram } // histo wavelets/experimental/plot.java100644 1040 1001 662 7314534507 15415 0ustar everyone package experimental; import java.io.*; class plot { PrintWriter OpenFile( String path ) { PrintWriter prStr = null; try { FileOutputStream outStr = new FileOutputStream( path ); prStr = new PrintWriter( outStr ); } catch (Exception e) { System.out.println("gnuplot3D: file name = " + path + ", " + e.getMessage() ); } return prStr; } // OpenFile } // plot wavelets/experimental/statistics.java100644 1040 1001 17447 7317757765 16722 0ustar everyone package experimental; import java.lang.Math.*; /**

Normal curve and normal curve graphing functions

This class supports the following public functions:

There are also two public nested classes:

This code is largely experimental and was written to understand how a graph of sorted Haar coefficient values relates to a normal curve with the same mean and standard deviation.

*/ public class statistics { /** Bell curve info: mean, sigma (the standard deviation), low (the start of the curve area) and high (the end of the curve area). */ public class bell_info { public bell_info() {}; public bell_info(double m, double s, double l, double h) { mean = m; sigma = s; low = l; high = h; } public double mean; public double sigma; public double low; public double high; } // bell_info /** Allocate a bell_info object and initialize it with the arguments mean, sigma, low and high. */ private bell_info new_info(double mean, double sigma, double low, double high) { bell_info info = new bell_info(mean, sigma, low, high); return info; } // new_info /**

Calculate the mean, standard deviation. Also note the low and high of the number range.

The stddev function is passed an array of numbers. It returns the statistics for a normal curve based on those numbers: mean, standard deviation, low value and high value.

*/ public static bell_info stddev( double v[] ) { bell_info stats = null; if (v != null && v.length > 0) { int N = v.length; double low = v[0]; double high = v[0]; // calculate the mean (a.k.a average), low and high double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + v[i]; if (v[i] < low) low = v[i]; if (v[i] > high) high = v[i]; } System.out.println("sum = " + sum ); double mean = sum / (double)N; // calculate the standard deviation sum double stdDevSum = 0; double x; for (int i = 0; i < N; i++) { x = v[i] - mean; stdDevSum = stdDevSum + (x * x); } double sigmaSquared = stdDevSum / (N-1); double sigma = Math.sqrt( sigmaSquared ); statistics s = new statistics(); stats = s.new_info(mean, sigma, low, high); } return stats; } // stddev /** A point on a graph */ public class point { public double x, y; } // point /** Allocate an array of point objects. The size allocated is given by the argument size. */ private point[] point_array( int size ) { point a[] = new point[ size ]; for (int i = 0; i < size; i++) { a[i] = new point(); } return a; } // point array /**

Print a histogram bin to stanard output.

*/ private void print_bins( histo.bin histoBins[] ) { if (histoBins != null) { for (int i = 0; i < histoBins.length; i++) { System.out.println( histoBins[i].start + " " + histoBins[i].percent ); } } } // print_bins /**

The amount of Gaussian noise in the Haar wavelet coefficients can be seen by graphing a histogram of the coefficients along with a Gaussian (normal) curve. For this graph to be meaningful the two histograms must be expressed in the same scale.

The statistics.normal_curve function generates a normal bell curve around the median where the x-axis is the number range over which the curve is plotted and the y-axis the the percentage probability for a point to appear at the associated place on the x-axis.

In contrast the y-axis for the histogram calculated for the Haar coefficients reflects the percentage of the total points in that particular histogram bin range.

The histogram of the Haar coefficients is plotted by calculating the percentage of the total for each histogram bin. This means that all the Haar histogram bins sum to one. To plot the normal curve in the same units as the histogram plot of the Haar coefficients, the normal curve is integrated over each histogram bin. The area under the normal curve is one also. The output is written to the standard output.

Summary:

Input argument curve:

    x-axis: number range
    y-axis: probability fraction
    
*/ public void integrate_curve( point curve[] ) { if (curve != null) { final int num_bins = 24; int len = curve.length; double low = curve[0].x; double high = curve[len-1].x; double range = high - low; double step = range / num_bins; double start = low; int i; histo.bin histoBins[] = new histo.bin[ num_bins ]; for (i = 0; i < num_bins; i++) { histo t = new histo(); histoBins[i] = t.alloc_bin(); histoBins[i].start = start; histoBins[i].percent = 0; start = start + step; } start = low; double end = low + step; i = 0; int ix = 0; double delta; double area = 0.0; double sum = 0.0; while (i < curve.length-1) { if (curve[i].x >= start && curve[i].x < end) { delta = curve[i+1].x - curve[i].x; area = curve[i].y * delta; histoBins[ix].percent = histoBins[ix].percent + area; sum = sum + area; i++; } else { start = end; end = end + step; ix++; } } // while System.out.println("integrate_curve: area under curve = " + sum); print_bins( histoBins ); } } // integrate_curve /** Calculate the information for a graph (composed of point objects) based on the bell_info argument. The graph will have a number of point objects equal to num_points. The equation to calculate a normal curve is used:
       f(y) = (1/(s * sqrt(2 * pi)) e-(1/(2 * s2)(y-u)2
     

Where u is the mean and s is the standard deviation.

*/ public static point[] normal_curve( bell_info info, int num_points ) { point[] graph = null; if (info != null) { statistics stat = new statistics(); graph = stat.point_array( num_points ); double s = info.sigma; // calculate 1/(s * sqrt(2 * pi)), where s is the stddev double sigmaSqrt = 1.0 / (s * (Math.sqrt(2 * Math.PI))); double oneOverTwoSigmaSqrd = 1.0 / (2 * s * s); double range = info.high - info.low; double step = range / num_points; double y = info.low; double f_of_y; double t; for (int i = 0; i < num_points; i++) { graph[i].x = y; t = y - info.mean; f_of_y = sigmaSqrt * Math.exp( -(oneOverTwoSigmaSqrd * t * t) ); graph[i].y = f_of_y; y = y + step; } } return graph; } // normal_curve } // statistics wavelets/filter_test.java100644 1040 1001 2223 7321424150 14267 0ustar everyone import java.io.*; import wavelets.*; import wavelet_util.*; import dataInput.*; /** Apply the gaussian filter to a time series (in this case the time series for Applied Materials (symbol: AMAT). Two files will be generated: filtered_data which contains the close price time series that has been filtered using Haar wavelets and filtered_data_noise which contains the noise spectrum. */ class filter_test { public static void main( String[] args ) { String timeSeriesFile = "amat_close"; // Applied Materials Close prices tsRead data = new tsRead( timeSeriesFile ); // // The wavelet algorithms work on arrays whose length is a power // of two. Set the length to the nearest power of two that is // less than or equal to the data length. // int len = data.getSize(); if (len > 0) { int newSize = binary.nearestPower2( len ); data.setSize( newSize ); double[] vals = data.getArray(); if (vals != null) { noise_filter filter = new noise_filter(); filter.filter_time_series( "filtered_data", vals ); } } } } // filter_test wavelets/histoTest.java100644 1040 1001 3555 7316771735 13764 0ustar everyone import wavelets.*; import wavelet_util.*; import experimental.*; /**

Test the histogram generation code.

The data that is histogrammed is shown below in sorted order (to make it easier to check the histogram result). The values are ordered row major. E.g.

0 1 2 3
4 5 6 7
8 9 ...
-1.0,     -0.9375,  -0.9375,  -0.78125
-0.75,    -0.71875, -0.71875, -0.6875
-0.625,   -0.625,   -0.5625,  -0.5
-0.46875, -0.46875, -0.4375,  -0.4375
-0.4375,  -0.4375,  -0.4375,  -0.40625
-0.375,   -0.28125, -0.28125, -0.25
-0.25,    -0.1875,  -0.15625, -0.15625
-0.0625,  -0.03125,  0.0,      0.0
0.0,       0.03125,  0.03125,  0.09375
0.09375,   0.09375,  0.09375,  0.21875
0.21875,   0.25,     0.25,     0.25
0.28125,   0.3125,   0.4375,   0.46875
0.5,       0.53125,  0.53125,  0.5625
0.59375,   0.625,    0.71875,  0.75
0.765625,  0.78125,  0.8125,   0.84375
0.9375,    0.9375,   1.03125,  1.15625
*/ class histoTest { public static void main( String[] args ) { double vals[] = { -0.9375, 0.9375, -0.25, -0.4375, -0.4375, 0.9375, 0.09375, -0.03125, 0.25, 0.09375,-0.75, -0.1875, -0.28125, 0.5, -0.9375, -1.0, -0.15625,-0.625, -0.5, 0.5625, -0.4375, -0.46875,-0.6875, 0.53125, 0.03125, 1.15625, 0.765625, 0.09375, -0.46875, 0.75, -0.375, -0.15625, -0.5625, 0.4375, 0.0, -0.625, 0.46875, 0.0, 0.25, -0.4375, 0.21875, -0.71875, 0.625, 0.53125, -0.0625, 0.03125,-0.28125, 0.09375, -0.25, -0.78125, 0.78125, 0.3125, 0.84375, 0.21875,-0.71875, 1.03125, 0.59375, -0.4375, 0.0, 0.25, 0.71875, 0.8125, 0.28125, -0.40625 }; histo graph = new histo(); graph.histogram( vals, "foofile" ); } // main } wavelets/inplace_haar_test.java100644 1040 1001 4355 7310527540 15425 0ustar everyone /** Test code for the inplace_haar.java wavelets code. To compile make sure that ".." is in the CLASSPATH. For example, on UNIX (or with the Cygnus Bash shell) setenv CLASSPATH ..:$CLASSPATH Compile commmand: javac simple_haar_test.java jvc simple_haar_test.java To run enter: jview simple_haar_test java simple_haar_test */ // package test; import wavelets.*; import wavelet_util.*; class inplace_haar_test { /** Print an array of doubles */ private void pr_vals( double[] vals ) { if (vals != null) { for (int i = 0; i < vals.length; i++) { System.out.print( vals[i] ); if (i < vals.length-1) System.out.print(", "); } System.out.println(); } } // pr_vals /** Test the simple_haar wavelet code, using the vals argument. */ private void wavelet_test( double[] vals ) { wavelets.inplace_haar haar = new wavelets.inplace_haar(); System.out.println("test data: "); pr_vals( vals ); System.out.println(); haar.wavelet_calc( vals ); System.out.println("wavelet coefficients, ordered by " + "increasing frequency:"); haar.pr_ordered(); System.out.println(); System.out.println("after calculating inverse Haar transform:"); haar.inverse(); haar.pr(); System.out.println(); } // wavelet_test public static void main( String[] args ) { inplace_haar_test mainRef = new inplace_haar_test(); if (mainRef != null) { double vals[] = { (double)3.0, (double)1.0, (double)0.0, (double)4.0, (double)8.0, (double)6.0, (double)9.0, (double)9.0 }; mainRef.wavelet_test(vals); System.out.println(); double vals2[] = { 32.0, 10.0, 20.0, 38.0, 37.0, 28.0, 38.0, 34.0, 18.0, 24.0, 18.0, 9.0, 23.0, 24.0, 28.0, 34.0 }; mainRef.wavelet_test(vals2); // Calculate the wavelet spectrum wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.wavelet_spectrum( vals2 ); // output spectrum to "foofile" gnuplot3D pts = new gnuplot3D( vals2, "foofile" ); } } // main } // inplace_haar_test wavelets/README_JAVADOC100644 1040 1001 626 7321653745 13112 0ustar everyone To generate the javadoc documentation for the test files and packages in this directory: 1. Create the local directory "doc" 2. Use the javadoc command below javadoc -private -d doc -classpath ".;e:\jdk1.2.2\jre\lib\rt.jar" *.java dataInput double_vec experimental sort wavelet_util wavelets Obviously this is for Windows. Change the directory paths as needed for UNIX or Linux. wavelets/readTest.java100644 1040 1001 660 7321424242 13503 0ustar everyone import dataInput.*; /** Test the time series read code. */ class readTest { public static void main( String[] args ) { tsRead ts = new tsRead("amat_close"); double[] vals = ts.getArray(); if (vals != null) { for (int i = 0; i < vals.length; i++) { System.out.println(i + " " + vals[i] ); } } else System.out.println("readTest: time series is null"); } // main } wavelets/simple_haar_test.java100644 1040 1001 3646 7310526231 15301 0ustar everyone /** Test code for the simple_haar.java wavelets code. To compile make sure that ".." is in the CLASSPATH. For example, on UNIX (or with the Cygnus Bash shell) setenv CLASSPATH ..:$CLASSPATH Compile commmand: javac simple_haar_test.java jvc simple_haar_test.java To run enter: jview simple_haar_test java simple_haar_test */ import wavelets.*; class simple_haar_test { /** Print an array of doubles */ private void pr_vals( double[] vals ) { if (vals != null) { for (int i = 0; i < vals.length; i++) { System.out.print( vals[i] ); if (i < vals.length-1) System.out.print(", "); } System.out.println(); } } // pr_vals /** Test the simple_haar wavelet code, using the vals argument. */ private void wavelet_test( double[] vals ) { wavelets.simple_haar haar = new wavelets.simple_haar(); System.out.println("test data: "); pr_vals( vals ); System.out.println(); haar.wavelet_calc( vals ); System.out.println("wavelet coefficients, ordered by " + "increasing frequency:"); haar.pr(); System.out.println(); System.out.println("after calculating inverse Haar transform:"); haar.inverse(); haar.pr_values(); System.out.println(); } // wavelet_test public static void main( String[] args ) { simple_haar_test mainRef = new simple_haar_test(); if (mainRef != null) { double vals[] = { (double)3.0, (double)1.0, (double)0.0, (double)4.0, (double)8.0, (double)6.0, (double)9.0, (double)9.0 }; mainRef.wavelet_test( vals ); double vals2[] = { 32.0, 10.0, 20.0, 38.0, 37.0, 28.0, 38.0, 34.0, 18.0, 24.0, 18.0, 9.0, 23.0, 24.0, 28.0, 34.0 }; mainRef.wavelet_test( vals2 ); } } // main } // simple_haar_test wavelets/sort/ 40755 1040 1001 0 7321430300 11765 5ustar everyonewavelets/sort/generic_sort.java100644 1040 1001 7630 7321427461 15434 0ustar everyone package sort; /**

A generic sort class for arrays whose elements are derived from Object. Sadly, this does not include types like integer and double.

This is an abstract class. The class that extends this class must define the compare function. The compare function returns an integer value that is less than, equal or greater than zero depending on the result of the comparision (the function is modeled after UNIX strcmp).

This class is based on the Java quicksort algorithm by James Gosling at at Sun Microsystems (see below).

@(#)QSortAlgorithm.java 1.3 29 Feb 1996 James Gosling

Copyright (c) 1994-1996 Sun Microsystems, Inc. All Rights Reserved.

Permission to use, copy, modify, and distribute this software and its documentation for NON-COMMERCIAL or COMMERCIAL purposes and without fee is hereby granted.

Please refer to the file http://www.javasoft.com/copy_trademarks.html for further important copyright and trademark information and to http://www.javasoft.com/licensing.html for further important licensing information for the Java (tm) Technology.

*/ public abstract class generic_sort { /** Exchange element a[i] and a[j] */ private void swap(Object a[], int i, int j) { Object t; t = a[i]; a[i] = a[j]; a[j] = t; } // swap /** This is a generic version of C.A.R Hoare's Quick Sort * algorithm. This will handle arrays that are already * sorted, and arrays with duplicate keys.
* * If you think of a one dimensional array as going from * the lowest index on the left to the highest index on the right * then the parameters to this function are lowest index or * left and highest index or right. The first time you call * this function it will be with the parameters 0, a.length - 1. * * @param a an integer array * @param lo0 left boundary of array partition * @param hi0 right boundary of array partition */ private void QuickSort(Object a[], int lo0, int hi0) { int lo = lo0; int hi = hi0; Object mid; if ( hi0 > lo0) { /* Arbitrarily establishing partition element as the midpoint of * the array. */ mid = a[ ( lo0 + hi0 ) / 2 ]; // loop through the array until indices cross while( lo <= hi ) { /* find the first element that is greater than or equal to * the partition element starting from the left Index. */ while( ( lo < hi0 ) && ( compare(a[lo], mid) < 0 ) ) ++lo; /* find an element that is smaller than or equal to * the partition element starting from the right Index. */ while( ( hi > lo0 ) && ( compare(a[hi], mid) > 0) ) --hi; // if the indexes have not crossed, swap if( lo <= hi ) { swap(a, lo, hi); ++lo; --hi; } } // while /* If the right index has not reached the left side of array * must now sort the left partition. */ if( lo0 < hi ) QuickSort( a, lo0, hi ); /* If the left index has not reached the right side of array * must now sort the right partition. */ if( lo < hi0 ) QuickSort( a, lo, hi0 ); } } // QuickSort public void sort(Object a[]) { QuickSort(a, 0, a.length - 1); } // sort /**

This is an abstract function that should be defined by the class derived from generic_sort. This function is passed two objects, a and b. It compares them and should return the following values:

    If (a == b) return 0
    if (a < b) return -1
    if (a > b) return 1
*/ abstract protected int compare( Object a, Object b); } // generic_sort wavelets/sort/qsort.java100644 1040 1001 5763 7316771554 14137 0ustar everyone package sort; /**

This class supports the Quicksort algorithm. This is a slightly modified version of the Qsort class written by James Gosling at Sun Microsystems (see below).

@(#)QSortAlgorithm.java 1.3 29 Feb 1996 James Gosling

Copyright (c) 1994-1996 Sun Microsystems, Inc. All Rights Reserved.

Permission to use, copy, modify, and distribute this software and its documentation for NON-COMMERCIAL or COMMERCIAL purposes and without fee is hereby granted.

Please refer to the file http://www.javasoft.com/copy_trademarks.html for further important copyright and trademark information and to http://www.javasoft.com/licensing.html for further important licensing information for the Java (tm) Technology.

*/ public class qsort { /** This is a generic version of C.A.R Hoare's Quick Sort * algorithm. This will handle arrays that are already * sorted, and arrays with duplicate keys.
* * If you think of a one dimensional array as going from * the lowest index on the left to the highest index on the right * then the parameters to this function are lowest index or * left and highest index or right. The first time you call * this function it will be with the parameters 0, a.length - 1. * * @param a an integer array * @param lo0 left boundary of array partition * @param hi0 right boundary of array partition */ static void QuickSort(double a[], int lo0, int hi0) { int lo = lo0; int hi = hi0; double mid; if ( hi0 > lo0) { /* Arbitrarily establishing partition element as the midpoint of * the array. */ mid = a[ ( lo0 + hi0 ) / 2 ]; // loop through the array until indices cross while( lo <= hi ) { /* find the first element that is greater than or equal to * the partition element starting from the left Index. */ while( ( lo < hi0 ) && ( a[lo] < mid ) ) ++lo; /* find an element that is smaller than or equal to * the partition element starting from the right Index. */ while( ( hi > lo0 ) && ( a[hi] > mid ) ) --hi; // if the indexes have not crossed, swap if( lo <= hi ) { swap(a, lo, hi); ++lo; --hi; } } // while /* If the right index has not reached the left side of array * must now sort the left partition. */ if( lo0 < hi ) QuickSort( a, lo0, hi ); /* If the left index has not reached the right side of array * must now sort the right partition. */ if( lo < hi0 ) QuickSort( a, lo, hi0 ); } } // QuickSort private static void swap(double a[], int i, int j) { double T; T = a[i]; a[i] = a[j]; a[j] = T; } // swap public static void sort(double a[]) { QuickSort(a, 0, a.length - 1); } // sort } // qsort wavelets/sortTest.java100644 1040 1001 6207 7321423165 13605 0ustar everyone import java.util.Random; import sort.*; /**

Test for generic sort

Classes to sort a specific type are derived from the abstract generic_sort class. This code tests the sort code by creating two specific sort classes: one that sorts arrays of testElem objects by index and one that sorts by the val field.

*/ class sortTest { /**

Test data structure: index is the array index and val is the data element.

An array of testElem objects can be sorted by value and than rearranged back into the original order by sorting by index.

*/ private class testElem { testElem( int i ) { index = i; } public int index; public double val; } /** Sort by index */ private class sort_testElem_index extends generic_sort { /** if (a.index == b.index) return 0 if (a.index < b.index) return -1 if (a.index > b.index) return 1; */ protected int compare( Object a, Object b ) { int rslt = 0; testElem t_a = (testElem)a; testElem t_b = (testElem)b; if (t_a.index < t_b.index) rslt = -1; else if (t_a.index > t_b.index) rslt = 1; return rslt; } // compare } // sort_testElem_index /** Sort by value */ private class sort_testElem_val extends generic_sort { /** if (a.val == b.val) return 0 if (a.val < b.val) return -1 if (a.val > b.val) return 1; */ protected int compare( Object a, Object b ) { int rslt = 0; testElem t_a = (testElem)a; testElem t_b = (testElem)b; if (t_a.val < t_b.val) rslt = -1; else if (t_a.val > t_b.val) rslt = 1; return rslt; } // compare } // sort_testElem_val public sort_testElem_val alloc_sort_testElem_val() { return new sort_testElem_val(); } public sort_testElem_index alloc_sort_testElem_index() { return new sort_testElem_index(); } public testElem[] alloc_array( int size ) { testElem a[] = new testElem[ size ]; for (int i = 0; i < size; i++) { a[i] = new testElem( i ); } return a; } void printArray( testElem a[] ) { for (int i = 0; i < a.length; i++) { System.out.println(i + " " + a[i].index + " " + a[i].val ); } } public static void main( String[] args ) { sortTest s = new sortTest(); final int size = 20; testElem a[] = s.alloc_array( size ); Random generator = new Random(); for (int i = 0; i < a.length; i++) { a[i].val = generator.nextDouble(); } sort_testElem_index sortByIndex = s.alloc_sort_testElem_index(); sort_testElem_val sortByVal = s.alloc_sort_testElem_val(); System.out.println("before sort by val"); s.printArray( a ); sortByVal.sort( a ); System.out.println("after sort by val"); s.printArray( a ); sortByIndex.sort( a ); System.out.println("after sort by index"); s.printArray( a ); } } // sortTest wavelets/spectrum_test.java100644 1040 1001 2056 7321424367 14662 0ustar everyone import java.io.*; import wavelets.*; import wavelet_util.*; import dataInput.*; /** Generate gnuplot files for the time series with various spectrum removed. */ class spectrum_test { public static void main( String[] args ) { String timeSeriesFile = "amat_close"; // Applied Materials Close prices tsRead data = new tsRead( timeSeriesFile ); // // The wavelet algorithms work on arrays whose length is a power // of two. Set the length to the nearest power of two that is // less than or equal to the data length. // int len = data.getSize(); if (len > 0) { int newSize = binary.nearestPower2( len ); data.setSize( newSize ); double[] vals = data.getArray(); if (vals != null) { wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.wavelet_calc( vals ); haar.order(); coef_spectrum spectrum = new coef_spectrum(); spectrum.filter_one_spectrum( vals ); spectrum.only_one_spectrum( vals ); } } } } // spectrum_test wavelets/statTest.java100644 1040 1001 3114 7321423552 13563 0ustar everyone import dataInput.*; import wavelets.*; import wavelet_util.binary; import experimental.*; /** Test the experimental code to generate a normal curve with the mean and standard deviation derived from a coefficient spectrum (in this case the highest frequency spectrum). */ class statTest { public static void print_curve( statistics.bell_info info, statistics.point curve[] ) { System.out.println("#"); System.out.println("# mean = " + info.mean ); System.out.println("# stddev = " + info.sigma ); System.out.println("#"); for (int i = 0; i < curve.length; i++) { System.out.println( curve[i].x + " " + curve[i].y ); } System.out.println(); } public static void main( String[] args ) { tsRead ts = new tsRead("amat_close"); int len = ts.getSize(); if (len > 0) { len = binary.nearestPower2(len); ts.setSize( len ); double vals[] = ts.getArray(); wavelets.inplace_haar haar = new inplace_haar(); haar.wavelet_calc( vals ); haar.order(); int end = vals.length; int start = end >> 1; double coef[] = new double[ start ]; int ix = 0; for (int i = start; i < end; i++) { coef[ix] = vals[i]; ix++; } statistics.bell_info info = statistics.stddev( coef ); if (info != null) { statistics.point curve[] = statistics.normal_curve(info, start); // print_curve( info, curve ); statistics stat = new statistics(); stat.integrate_curve( curve ); } } } // main } // statTest wavelets/timeseries_histo.java100644 1040 1001 6107 7314667101 15336 0ustar everyone import wavelets.*; import wavelet_util.*; import experimental.*; import dataInput.*; /**

Generate histograms for the Haar coefficients created by applying the Haar transform to the time series for the Applied Materials (symbol: AMAT) daily close price.

There are 512 data points in the AMAT daily close price time series. This program generates a histogram for the first four high frequency sets of coefficients (e.g., 256, 128, 64, and 32 coefficients).

Financial theory states that the average daily return (e.g, the difference between today'ss close prices and yesterday's close price) is normally distributed. So the histogram of the highest frequency coefficients, which reflect the difference between two close prices, should be bell curve shaped, centered around zero.

The close price in the AMAT time series rises sharply about half way through. So as the coefficient frequency decreases, the histogram will be shifted farther and farter away from zero.

Note that an inplace Haar transform is used that replaces the values with the coefficients. The order function orders the coefficients from the butterfly pattern generated by the inplace algorithm into increasing frequencies, where the lowest frequency is at the beginning of the array. Each frequency is a power of two: 2, 4, 8, 16, 32, 64, 128, 256.

*/ class timeseries_histo { /**

Graph the coefficients.

*/ private void graph_coef( double[] vals ) { histo graph = new histo(); String file_name_base = "coef_histo"; histo.mean_median m; int start; int end = vals.length; do { start = end >> 1; double coef[] = new double[ start ]; int ix = 0; double sum = 0; for (int i = start; i < end; i++, ix++) { coef[ix] = vals[i]; sum = sum + vals[i]; } System.out.println("graph_coef: sum = " + sum ); String file_name = file_name_base + start; m = graph.histogram( coef, file_name ); if (m != null) { System.out.println("coef " + start + " mean = " + m.mean + " median = " + m.median ); } end = start; } while (start > 32); // while } // graph_coef public static void main( String[] args ) { String timeSeriesFile = "amat_close"; // Applied Materials Close prices tsRead data = new tsRead( timeSeriesFile ); // // The wavelet algorithms work on arrays whose length is a power // of two. Set the length to the nearest power of two that is // less than or equal to the data length. // int len = data.getSize(); if (len > 0) { int newSize = binary.nearestPower2( len ); data.setSize( newSize ); double[] vals = data.getArray(); if (vals != null) { gnuplot3D pts; wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.wavelet_calc( vals ); haar.order(); timeseries_histo tsHisto = new timeseries_histo(); tsHisto.graph_coef( vals ); } } } // main } wavelets/timeseries_test.java100644 1040 1001 2367 7310532440 15164 0ustar everyone import wavelets.*; import wavelet_util.*; import dataInput.*; /**

Test the Inplace Haar wavelet algorithm with a financial time series, in this case, the daily close price for Applied Materials (symbol: AMAT).

The code below reads the time series and generates outputs the coefficients and the wavelet spectrum for graphing. */ class timeseries_test { public static void main( String[] args ) { String timeSeriesFile = "amat_close"; // Applied Materials Close prices tsRead data = new tsRead( timeSeriesFile ); // // The wavelet algorithms work on arrays whose length is a power // of two. Set the length to the nearest power of two that is // less than or equal to the data length. // int len = data.getSize(); if (len > 0) { int newSize = binary.nearestPower2( len ); data.setSize( newSize ); double[] vals = data.getArray(); if (vals != null) { gnuplot3D pts; wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.wavelet_calc( vals ); haar.order(); pts = new gnuplot3D(vals, "coef" ); haar.inverse(); haar.wavelet_spectrum( vals ); pts = new gnuplot3D(vals, "spectrum"); } } } // main } wavelets/wavelets/ 40755 1040 1001 0 7321417414 12643 5ustar everyonewavelets/wavelets/inplace_haar.java100644 1040 1001 60563 7317433042 16243 0ustar everyone package wavelets; import wavelet_util.*; /** *

Copyright and Use

You may use this source code without limitation and without fee as long as you include:

This software was written and is copyrighted by Ian Kaplan, Bear Products International, www.bearcave.com, 2001.

This software is provided "as is", without any warrenty or claim as to its usefulness. Anyone who uses this source code uses it at their own risk. Nor is any support provided by Ian Kaplan and Bear Products International.

Please send any bug fixes or suggested source changes to:

     iank@bearcave.com

To generate the documentation for the wavelets package using Sun Microsystem's javadoc use the command

        javadoc -private wavelets

The inplace_haar class calculates an in-place Haar wavelet transform. By in-place it's ment that the result occupies the same array as the data set on which the Haar transform is calculated.

The Haar wavelet calculation is awkward when the data values are not an even power of two. This is especially true for the in-place Haar. So here we only support data that falls into an even power of two.

The sad truth about computation is that the time-space tradeoff is an iron rule. The Haar in-place wavelet transform is more memory efficient, but it also takes more computation.

The algorithm used here is from Wavelets Made Easy by Yves Nievergelt, section 1.4. The in-place transform replaces data values when Haar values and coefficients. This algorithm uses a butterfly pattern, where the indices are calculated by the following:

   for (l = 0; l < log2( size ); l++) {
     for (j = 0; j < size; j++) {
        aj = 2l * (2 * j);
        cj = 2l * ((2 * j) + 1);
        if (cj >= size)
	  break;
     } // for j
   } // for l

If there are 16 data elements (indexed 0..15), these loops will generate the butterfly index pattern shown below, where the first element in a pair is aj, the Haar value and the second element is cj, the Haar coefficient.

{0, 1} {2, 3} {4, 5} {6, 7} {8, 9} {10, 11} {12, 13} {14, 15}
{0, 2} {4, 6} {8, 10} {12, 14}
{0, 4} {8, 12}
{0, 8}

Each of these index sets represents a Haar wavelet frequency (here they are listed from the highest frequency to the lowest). @author Ian Kaplan */ public class inplace_haar extends wavelet_base { /** result of calculating the Haar wavelet */ private double[] wavefx; /** initially false: true means wavefx is ordered by frequency */ boolean isOrdered = false; /** Set the wavefx reference variable to the data vector. Also, initialize the isOrdered flag to false. This indicates that the Haar coefficients have not been calculated and ordered by frequency. */ public void setWavefx( double[] vector ) { if (vector != null) { wavefx = vector; isOrdered = false; } } public void setIsOrdered() { isOrdered = true; } /** *

Calculate the in-place Haar wavelet function. The data values are overwritten by the coefficient result, which is pointed to by a local reference (wavefx).

The in-place Haar transform leaves the coefficients in a butterfly pattern. The Haar transform calculates a Haar value (a) and a coefficient (c) from the forumla shown below.

  ai = (vi + vi+1)/2
   ci = (vi - vi+1)/2

In the in-place Haar transform the values for a and c replace the values for vi and vi+1. Subsequent passes calculate new a and c values from the previous ai and ai+1 values. The produces the butterfly pattern outlined below.


v0 v1 v2 v3 v4 v5 v6 v7

a0 c0 a0 c0 a0 c0 a0 c0

a1 c0 c1 c0 a1 c0 c1 c0

a2 c0 c1 c0 c2 c0 c1 c0

For example, Haar wavelet the calculation with the data set {3, 1, 0, 4, 8, 6, 9, 9} is shown below. Bold type denotes an a value which will be used in the next sweep of the calculation.

3  1  0  4  8  6  9  9

2  1  2 -2  7  1  9  0

2  1  0 -2  8  1 -1  0

5  1  0 -2 -3  1 -1  0
@param values An array of double data values from which the Haar wavelet function will be calculated. The number of values must be a power of two. */ public void wavelet_calc( double[] values ) { int len = values.length; setWavefx( values ); if (len > 0) { byte log = binary.log2( len ); len = binary.power2( log ); // calculation must be on 2 ** n values for (byte l = 0; l < log; l++) { int p = binary.power2( l ); for (int j = 0; j < len; j++) { int a_ix = p * (2 * j); int c_ix = p * ((2 * j) + 1); if (c_ix < len) { double a = (values[a_ix] + values[c_ix]) / 2; double c = (values[a_ix] - values[c_ix]) / 2; values[a_ix] = a; values[c_ix] = c; } else { break; } } // for j } // for l } } // wavelet_calc /** * Recursively calculate the Haar spectrum, replacing data in the original array with the calculated averages. */ private void spectrum_calc(double[] values, int start, int end ) { int j; int newEnd; if (start > 0) { j = start-1; newEnd = end >> 1; } else { j = end-1; newEnd = end; } for (int i = end-1; i > start; i = i - 2, j--) { values[j] = (values[i-1] + values[i]) / 2; } // for if (newEnd > 1) { int newStart = newEnd >> 1; spectrum_calc(values, newStart, newEnd); } } // spectrum_calc /** *

Calculate the Haar wavelet spectrum

Wavelet calculations sample a signal via a window. In the case of the Haar wavelet, this window is a rectangle. The signal is sampled in passes, using a window of a wider width for each pass. Each sampling can be thought of as generating a spectrum of the original signal at a particular resolution.

In the case of the Haar wavelet, the window is initially two values wide. The first spectrum has half as many values as the original signal, where each new value is the average of two values from the original signal.

The next spectrum is generated by increasing the window width by a factor of two and averaging four elements. The third spectrum is generated by increasing the window size to eight, etc... Note that each of these averages can be formed from the previous average.

For example, if the initial data set is

    { 32.0, 10.0, 20.0, 38.0, 37.0, 28.0, 38.0, 34.0,
      18.0, 24.0, 18.0,  9.0, 23.0, 24.0, 28.0, 34.0 }

The first spectrum is constructed by averaging elements {0,1}, {2,3}, {4,5} ...

    {21, 29, 32.5, 36, 21, 13.5, 23.5, 31} 1st spectrum

The second spectrum is constructed by averaging elements averaging elements {0,1}, {2,3} in the first spectrum:

    {25, 34.25, 17.25, 27.25}           2nd spectrum

    {29.625, 22.25}                     3ed spectrum

    {25.9375}                           4th spectrum

Note that we can calculate the Haar spectrum "in-place", by replacing the original data values with the spectrum values:

    {0, 
     25.9375, 
     29.625, 22.25, 
     25, 34.25, 17.25, 27.25,
     21, 29, 32.5, 36, 21, 13.5, 23.5, 31}

The spetrum is ordered by increasing frequency. This is the same ordering used for the Haar coefficients. Keeping to this ordering allows the same code to be applied to both the Haar spectrum and a set of Haar coefficients.

This function will destroy the original data. When the Haar spectrum is calculated information is lost. For example, without the Haar coefficients, which provide the difference between the two numbers that form the average, there may be several numbers which satisify the equation

   ai = (vj + vj+1)/2

For 2n initial elements, there will be 2n - 1 results. For example:

    512 : initial length
    256 : 1st spectrum
    128 : 2nd spectrum
     64 : 3ed spectrum
     32 : 4th spectrum
     16 : 5th spectrum
      8 : 6th spectrum
      4 : 7th spectrum
      2 : 8th spectrum
      1 : 9th spectrum (overall average)

Since this is an in-place algorithm, the result is returned in the values argument.

*/ public void wavelet_spectrum( double[] values ) { int len = values.length; if (len > 0) { setWavefx( values ); byte log = binary.log2( len ); len = binary.power2( log ); // calculation must be on 2 ** n values int srcStart = 0; spectrum_calc(values, srcStart, len); values[0] = 0; } } // wavelet_vals /** * Print the result of the Haar wavelet function. */ public void pr() { if (wavefx != null) { int len = wavefx.length; System.out.print("{"); for (int i = 0; i < len; i++) { System.out.print( wavefx[i] ); if (i < len-1) System.out.print(", "); } System.out.println("}"); } } // pr /** *

Print the Haar value and coefficient showing the ordering. The Haar value is printed first, followed by the coefficients in increasing frequency. An example is shown below. The Haar value is shown in bold. The coefficients are in normal type.

Data set

    { 32.0, 10.0, 20.0, 38.0,
      37.0, 28.0, 38.0, 34.0,
      18.0, 24.0, 18.0,  9.0, 
      23.0, 24.0, 28.0, 34.0 }

Ordered Haar transform:

   25.9375
   3.6875
   -4.625 -5.0
   -4.0 -1.75 3.75 -3.75
   11.0 -9.0 4.5 2.0 -3.0 4.5 -0.5 -3.0
*/ public void pr_ordered() { if (wavefx != null) { int len = wavefx.length; if (len > 0) { System.out.println(wavefx[0]); int num_in_freq = 1; int cnt = 0; for (int i = 1; i < len; i++) { System.out.print(wavefx[i] + " "); cnt++; if (cnt == num_in_freq) { System.out.println(); cnt = 0; num_in_freq = num_in_freq * 2; } } } } } // pr_ordered /** *

Order the result of the in-place Haar wavelet function, referenced by wavefx. As noted above in the documentation for wavelet_calc(), the in-place Haar transform leaves the coefficients in a butterfly pattern. This can be awkward for some calculations. The order function orders the coefficients by frequency, from the lowest frequency to the highest. The number of coefficients for each frequency band follow powers of two (e.g., 1, 2, 4, 8 ... 2n). An example of the coefficient sort performed by the order() function is shown below;

before: a2 c0 c1 c0 c2 c0 c1 c0

after:  a2 c2 c1 c1 c0 c0 c0 c0

The results in the same ordering as the ordered Haar wavelet transform.

If the number of elements is 2n, then the largest number of coefficients will be 2n-1. Each of the coefficients in the largest group is separated by one element (which contains other coefficients). This algorithm pushes these together so they are not separated. These coefficients now make up half of the array. The remaining coefficients take up the other half. The next frequency down is also separated by one element. These are pushed together taking up half of the half. The algorithm keeps going until only one coefficient is left.

As with wavelet_calc above, this algorithm assumes that the number of values is a power of two. */ public void order() { if (wavefx != null) { int half = 0; for (int len = wavefx.length; len > 1; len = half) { half = len / 2; int skip = 1; for (int dest = len - 2; dest >= half; dest--) { int src = dest - skip; double tmp = wavefx[src]; for (int i = src; i < dest; i++) wavefx[i] = wavefx[i+1]; wavefx[dest] = tmp; skip++; } // for dest } // for len isOrdered = true; } // if } // order() /** *

Regenerate the data from the Haar wavelet function.

There is no information loss in the Haar function. The original data can be regenerated by reversing the calculation. Given a Haar value, a and a coefficient c, two Haar values can be generated

        ai  = a + c;
        ai+1 = a - c;

The transform is calculated from the low frequency coefficients to the high frequency coefficients. An example is shown below for the result of the ordered Haar transform. Note that the values are in bold and the coefficients are in normal type.

To regenerate {a1, a2, a3, a4, a5, a6, a7, a8} from

a1
c1
c2 c3
c4 c5 c6 c7

The inverse Haar transform is applied:

a1 = a1 + c1
a2 = a1 - c1

a1 = a1 + c2
a2 = a1 - c2
a3 = a2 + c3
a4 = a2 - c3

a1 = a1 + c4
a2 = a1 - c4
a3 = a2 + c5
a4 = a2 - c5
a5 = a3 + c6
a6 = a3 - c6
a7 = a4 + c7
a8 = a4 - c7

For example:

5.0
-3.0
0.0 -1.0
1.0 -2.0 1.0 0.0

5.0+(-3), 5.0-(-3) = {2 8}

2+0, 2-0, 8+(-1), 8-(-1) = {2, 2, 7, 9}

2+1, 2-1, 2+(-2), 2-(-2), 7+1, 7-1, 9+0, 9-0 = {3,1,0,4,8,6,9,9}

By using the butterfly indices the inverse transform can also be applied to an unordered in-place haar function.

This function checks the to see whether the wavefx array is ordered. If wavefx is ordered the inverse transform described above is applied. If the data remains in the in-order configuration an inverse butterfly is applied. Note that once the inverse Haar is calculated the Haar function data will be replaced by the original data. */ public void inverse() { if (wavefx != null) { if (isOrdered) { inverse_ordered(); // Since the signal has been rebuilt from the // ordered coefficients, set isOrdered to false isOrdered = false; } else { inverse_butterfly(); } } } // inverse /** *

Calculate the inverse Haar transform on an ordered set of coefficients.

See comment above for the inverse() method for details.

The algorithm above uses an implicit temporary. The in-place algorithm is a bit more complex. As noted above, the Haar value and coefficient are replaced with the newly calculated values:

     t1 = a1 + c1;
     t2 = a1 - c1;
     a1 = t1;
     c1 = t2

As the calculation proceeds and the coefficients are replaced by the newly calculated Haar values, the values are out of order. This is shown in the below (use javadoc -private wavelets). Here each element is numbered with a subscript as it should be ordered. A sort operation reorders these values as the calculation proceeds. The variable L is the power of two.

start: {5.0, -3.0, 0.0, -1.0, 1.0, -2.0, 1.0, 0.0}
[0, 1]
L = 1
{2.00, 8.01, 0.0, -1.0, 1.0, -2.0, 1.0, 0.0}
 sort:

 start: {2.00, 8.01, 0.0, -1.0, 1.0, -2.0, 1.0, 0.0}
[0, 2], [1, 3]
L = 2
{2.00, 7.02, 2.01, 9.03, 1.0, -2.0, 1.0, 0.0}
 sort:
exchange [1, 2]
{2.00, 2.01, 7.02, 9.03, 1.0, -2.0, 1.0, 0.0}

 start: {2.0, 2.0, 7.0, 9.0, 1.0, -2.0, 1.0, 0.0}
[0, 4], [1, 5], [2, 6], [3, 7]
L = 4
{3.00, 0.02, 8.04, 9.06, 1.01, 4.03, 6.05, 9.07}
 sort:
exchange [1, 4]
{3.00, 1.01, 8.04, 9.06, 0.02, 4.03, 6.05, 9.07}
exchange [2, 5]
{3.00, 1.01, 4.03, 9.06, 0.02, 8.04, 6.05, 9.07}
exchange [3, 6]
{3.00, 1.01, 4.03, 6.05, 0.02, 8.04, 9.06, 9.07}
exchange [2, 4]
{3.00, 1.01, 0.02, 6.05, 4.03, 8.04, 9.06, 9.07}
exchange [3, 5]
{3.00, 1.01, 0.02, 8.04, 4.03, 6.05, 9.06, 9.07}
exchange [3, 4]
{3.00, 1.01, 0.02, 4.03, 8.04, 6.05, 9.06, 9.07}
********/ private void inverse_ordered() { int len = wavefx.length; for (int L = 1; L < len; L = L * 2) { int i; // calculate for (i = 0; i < L; i++) { int a_ix = i; int c_ix = i + L; double a1 = wavefx[a_ix] + wavefx[c_ix]; double a1_plus_1 = wavefx[a_ix] - wavefx[c_ix]; wavefx[a_ix] = a1; wavefx[c_ix] =a1_plus_1; } // for i // sort int offset = L-1; for (i = 1; i < L; i++) { for (int j = i; j < L; j++) { double tmp = wavefx[j]; wavefx[j] = wavefx[j+offset]; wavefx[j+offset] = tmp; } // for j offset = offset - 1; } // for i } // for L } // inverse_ordered /** *

Calculate the inverse Haar transform on the result of the in-place Haar transform.

The inverse butterfly exactly reverses in-place Haar transform. Instead of generating coefficient values (ci), the inverse butterfly calculates Haar values (ai) using the equations:

        new_ai = ai + ci
        new_ai+1 = ai - ci
a0 c0 c1 c0 c2 c0 c1 c0

a1 = a0 + c2
a1 = a0 - c2

a1 c0 c1 c0 a1 c0 c1 c0

a2 = a1 + c1
a2 = a1 - c1
a2 = a1 + c1
a2 = a1 - c1

a2 c0 a2 c0 a2 c0 a2 c0

a3 = a2 + c0
a3 = a2 - c0
a3 = a2 + c0
a3 = a2 - c0
a3 = a2 + c0
a3 = a2 - c0
a3 = a2 + c0
a3 = a2 - c0

a3 a3 a3 a3 a3 a3 a3 a3 

A numeric example is shown below.

50  10  01 -20 -32  10 -11  00

a1 = 5 + (-3) = 2
a1 = 5 - (-3) = 8

21  10  01 -20 81  10 -11  00

a2 = 2 + 0    = 2
a2 = 2 - 0    = 2
a2 = 8 + (-1) = 7
a2 = 8 - (-1) = 9

22  10  22 -20 72  10 92  00

a3 = 2 + 1    = 3
a3 = 2 - 1    = 1
a3 = 2 + (-2) = 0
a3 = 2 - (-2) = 4
a3 = 7 + 1    = 8
a3 = 7 - 1    = 6
a3 = 9 + 0    = 9
a3 = 9 - 0    = 9

33  13  03  43  83  63  93  93

Note that the inverse_butterfly function is faster than the inverse_ordered function, since data does not have to be reshuffled during the calculation. */ private void inverse_butterfly() { int len = wavefx.length; if (len > 0) { byte log = binary.log2( len ); len = binary.power2( log ); // calculation must be on 2 ** n values for (byte l = (byte)(log-1); l >= 0; l--) { int p = binary.power2( l ); for (int j = 0; j < len; j++) { int a_ix = p * (2 * j); int c_ix = p * ((2 * j) + 1); if (c_ix < len) { double a = wavefx[a_ix] + wavefx[c_ix]; double c = wavefx[a_ix] - wavefx[c_ix]; wavefx[a_ix] = a; wavefx[c_ix] = c; } else { break; } } // for j } // for l } } // inverse_butterfly } // inplace_haar wavelets/wavelets/simple_haar.java100644 1040 1001 17767 7310522635 16131 0ustar everyone package wavelets; import java.util.*; import wavelet_util.*; /** *

Class simple_haar

This object calcalculates the "ordered fast Haar wavelet transform". The algorithm used is the a simple Haar wavelet algorithm that does not calculate the wavelet transform in-place. The function works on Java double values.

The wavelet_calc function is passed an array of doubles from which it calculates the Haar wavelet transform. The transform is not calculated in place. The result consists of a single value and a Vector of coefficients arrays, ordered by increasing frequency. The number of data points in the data used to calculate the wavelet must be a power of two.

The Haar wavelet transform is based on calculating the Haar step function and the Haar wavelet from two adjacent values. For an array of values S0, S1, S2 .. Sn, the step function and wavelet are calculated as follows for two adjacent points, S0 and S1:

      HaarStep = (S0 + S1)/2  // average of S0 and S1
      HaarWave = (S0 - S1)/2  // average difference of S0 and S1

This yields two vectors: a, which contains the HaarStep values and c, which contains the HaarWave values.

The result of the wavelet_calc is the single Haar value and a set of coefficients. There will be ceil( log2( values.length() )) coefficients. @author Ian Kaplan @see Wavelets Made Easy by Yves Nieverglt, Birkhauser, 1999

Copyright and Use

You may use this source code without limitation and without fee as long as you include:

This software was written and is copyrighted by Ian Kaplan, Bear Products International, www.bearcave.com, 2001.

This software is provided "as is", without any warrenty or claim as to its usefulness. Anyone who uses this source code uses it at their own risk. Nor is any support provided by Ian Kaplan and Bear Products International.

Please send any bug fixes or suggested source changes to:

     iank@bearcave.com
*/ public class simple_haar extends wavelet_base { private double haar_value; // the final Haar step value private Vector coef; // The Haar coefficients private double[] data; /** *

Calculate the Haar wavelet transform (the ordered fast Haar wavelet tranform). This calculation is not done in place.

@param values a values: an array of double values on which the Haar transform is applied. */ public void wavelet_calc( double[] values ) { if (values != null) { data = values; coef = new Vector(); haar_value = haar_calc( values ); reverseCoef(); } } // wavelet_calc /** The Haar transform coefficients are generated from the longest coefficient vector (highest frequency) to the shortest (lowest frequency). However, the reverse Haar transform and the display of the values uses the coefficients from the lowest to the highest frequency. This function reverses the coefficient order, so they will be ordered from lowest to highest frequency. */ private void reverseCoef() { int size = coef.size(); Object tmp; for (int i = 0, j = size-1; i < j; i++, j--) { tmp = coef.elementAt(i); coef.setElementAt(coef.elementAt(j), i); coef.setElementAt(tmp, j); } // for } // reverseCoef /** * Recursively calculate the Haar transform. The result of the Haar transform is a single integer value and a Vector of coefficients. The coefficients are calculated from the highest to the lowest frequency.

The number of elements in values must be a power of two. */ private double haar_calc( double[] values ) { double retVal; double[] a = new double[ values.length/2 ]; double[] c = new double[ values.length/2 ]; for (int i = 0, j = 0; i < values.length; i += 2, j++) { a[j] = (values[i] + values[i+1])/2; c[j] = (values[i] - values[i+1])/2; } coef.addElement( c ); if (a.length == 1) retVal = a[0]; else retVal = haar_calc( a ); return retVal; } // haar_calc /** *

Calculate the inverse haar transform from the coefficients and the Haar value.

The inverse function will overwrite the original data that was used to calculate the Haar transform. Since this data is initialized by the caller, the caller should make a copy if the data should not be overwritten.

The coefficients are stored in in a Java Vector container. The length of the coefficient arrays is ordered in powers of two (e.g., 1, 2, 4, 8...). The inverse Haar function is calculated using a butterfly pattern to write into the data array. An initial step writes the Haar value into data[0]. In the case of the example below this would be

     data[0] = 5.0;

Then a butterfly pattern is shown below. Arrays indices start at 0, so in this example c[1,1] is the second element of the second coefficient vector.

wavelet:
{[5.0];
-3.0;
0.0, -1.0;
1.0, -2.0, 1.0, 0.0}

tmp = d[0];
d[0] = tmp + c[0, 0]
d[4] = tmp - c[0, 0]

tmp = d[0];
d[0] = tmp + c[1, 0]
d[2] = tmp - c[1, 0]
tmp = d[4];
d[4] = tmp + c[1, 1]
d[6] = tmp - c[1, 1]

tmp = d[0];
d[0] = tmp + c[2, 0]
d[1] = tmp - c[2, 0]
tmp = d[2];
d[2] = tmp + c[2, 1]
d[3] = tmp - c[2, 1]
tmp = d[4];
d[4] = tmp + c[2, 2]
d[5] = tmp - c[2, 2]
tmp = d[6];
d[6] = tmp + c[2, 3]
d[7] = tmp - c[2, 3]
*/ public void inverse() { if (data != null && coef != null && coef.size() > 0) { int len = data.length; data[0] = haar_value; if (len > 0) { // System.out.println("inverse()"); byte log = binary.log2( len ); len = binary.power2( log ); // calculation must be on 2 ** n values int vec_ix = 0; int last_p = 0; byte p_adj = 1; for (byte l = (byte)(log-1); l >= 0; l--) { int p = binary.power2( l ); double c[] = (double[])coef.elementAt( vec_ix ); int coef_ix = 0; for (int j = 0; j < len; j++) { int a_1 = p * (2 * j); int a_2 = p * ((2 * j) + 1); if (a_2 < len) { double tmp = data[a_1]; data[ a_1 ] = tmp + c[coef_ix]; data[ a_2 ] = tmp - c[coef_ix]; coef_ix++; } else { break; } } // for j last_p = p; p_adj++; vec_ix++; } // for l } } } // inverse /** Print the simple Haar object (e.g, the final Haar step value and the coefficients. */ public void pr() { if (coef != null) { System.out.print("{[" + haar_value + "]"); int size = coef.size(); double[] a; for (int i = 0; i < size; i++) { System.out.println(";"); a = (double[])coef.elementAt(i); for (int j = 0; j < a.length; j++) { System.out.print( a[j] ); if (j < a.length-1) { System.out.print(", "); } } // for j } // for i System.out.println("}"); } } // pr /** *

Print the data values.

The pr() method prints the coefficients in increasing frequency. This function prints the data values which were used to generate the Haar transform. */ public void pr_values() { if (data != null) { System.out.print("{"); for (int i = 0; i < data.length; i++) { System.out.print( data[i] ); if (i < data.length-1) System.out.print(", "); } System.out.println("}"); } } // pr_values } // simple_haar wavelets/wavelets/wavelet_base.java100644 1040 1001 1423 7276660504 16255 0ustar everyone package wavelets; import java.util.*; /** *

Wavelet base class

The wavelet base class supplies the common functions power2 and log2 and defines the abstract methods for the derived classes. @author Ian Kaplan @see Wavelets Made Easy by Yves Nieverglt, Birkhauser, 1999 */ abstract class wavelet_base { /** * Abstract function for calculating a wavelet function. @param values Calculate the wavelet function from the values array. */ abstract public void wavelet_calc( double[] values ); /** * Print the wavelet function result. */ abstract public void pr(); abstract public void inverse(); } // wavelet_interface wavelets/wavelet_util/ 40755 1040 1001 0 7321702017 13511 5ustar everyonewavelets/wavelet_util/bell_curves.java100644 1040 1001 30356 7321702053 17005 0ustar everyone package wavelet_util; import java.io.*; import sort.qsort; /**

class bell_curves

Plot the Haar coefficients as a histogram, in Gnuplot format. In another file generate a normal curve with the mean and standard deviation of the histogram. Using two files allows the histogram and the normal curve to be plotted together using gnu plot, where the histogram and normal curve will be different colored lines. If the spectrum as 256 coefficient points, the files generated would be coef256 and normal256 for the coefficient histogram and the normal curve. To plot these using gnuplot the following command would be used:

   plot 'coef256' with boxes, 'normal256' with lines

This will result in a gnuplot graph where the histogram is one color and the normal curve is another.

A normal curve is a probability distribution, where the values are plotted on the x-axis and the probability of that value occuring is plotted on the y-axis. To plot the coefficient histogram in the same terms, the percentage of the total points is represented for each histogram bin. This is the same as the integral of the normal curve in the histogram range, if the coefficient histogram fell in a perfect normal distribution. For example;

*/ public class bell_curves extends plot { String class_name() { return "bell_curves"; } /** A histogram "bin" */ private class bin { public bin() {}; // suppress default initialization public double start; // start of the bin public double percent; // percentage of the total points in bin } /** Encapsulate the low and high values of a number range */ private class low_high { public low_high() {} public low_high( double l, double h ) { low = l; high = h; } public double low; public double high; } /** Bell curve info: mean, sigma (the standard deviation) */ private class bell_info { public bell_info() {} public bell_info(double m, double s) { mean = m; sigma = s; } public double mean; public double sigma; } // bell_info /**

Calculate the mean and standard deviation.

The stddev function is passed an array of numbers. It returns the mean, standard deviation in the bell_info object.

*/ public bell_info stddev( double v[] ) { bell_info stats = null; if (v != null && v.length > 0) { int N = v.length; // calculate the mean (a.k.a average) double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + v[i]; } double mean = sum / (double)N; // calculate the standard deviation sum double stdDevSum = 0; double x; for (int i = 0; i < N; i++) { x = v[i] - mean; stdDevSum = stdDevSum + (x * x); } double sigmaSquared = stdDevSum / (N-1); double sigma = Math.sqrt( sigmaSquared ); stats = new bell_info(mean, sigma); } return stats; } // stddev /**

normal_interval

Numerically integreate the normal curve with mean info.mean and standard deviation info.sigma over the range low to high.

There normal curve equation that is integrated is:

       f(y) = (1/(s * sqrt(2 * pi)) e-(1/(2 * s2)(y-u)2
     

Where u is the mean and s is the standard deviation.

The area under the section of this curve from low to high is returned as the function result.

The normal curve equation results in a curve expressed as a probability distribution, where probabilities are expressed as values greater than zero and less than one. The total area under a normal curve is one.

The integral is calculated in a dumb fashion (e.g., we're not using anything fancy like simpson's rule). The area in the interval xi to xi+1 is

     area = (xi+1 - xi) * g(xi)
     

Where the function g(xi) is the point on the normal curve probability distribution at xi.

@param info This object encapsulates the mean and standard deviation @param low Start of the integral @param high End of the integral @param num_points Number of points to calculate (should be even) */ private double normal_interval(bell_info info, double low, double high, int num_points ) { double integral = 0; if (info != null) { double s = info.sigma; // calculate 1/(s * sqrt(2 * pi)), where s is the stddev double sigmaSqrt = 1.0 / (s * (Math.sqrt(2 * Math.PI))); double oneOverTwoSigmaSqrd = 1.0 / (2 * s * s); double range = high - low; double step = range / num_points; double x = low; double f_of_x; double area; double t; for (int i = 0; i < num_points-1; i++) { t = x - info.mean; f_of_x = sigmaSqrt * Math.exp( -(oneOverTwoSigmaSqrd * t * t) ); area = step * f_of_x; // area of one rectangle in the interval integral = integral + area; // sum of the rectangles x = x + step; } // for } return integral; } // normal_interval /**

Output a gnuplot formatted histogram of the area under a normal curve through the range m.low to m.high based on the mean and standard deviation of the values in the array v. The number of bins used in the histogram is num_bins

@param prStr PrintWriter object for output file @param num_bins Number of histogram bins @param m An object encapsulating the high and low values of v @param v An array of doubles from which the mean and standard deviation is calculated. */ private void normal_curve(PrintWriter prStr, int num_bins, low_high m, double v[] ) { // calculate the mean and standard deviation bell_info info = stddev( v ); int N = v.length; int points_per_bin = N/num_bins; double range = m.high - m.low; double step = range / (double)num_bins; double start = m.low; double end = start + step; double area; double total_area = 0; prStr.println("#"); prStr.println("# histogram of normal curve"); prStr.println("# mean = " + info.mean + ", std. dev. = " + info.sigma ); prStr.println("#"); for (int i = 0; i < num_bins; i++) { area = normal_interval( info, start, end, points_per_bin ); total_area = total_area + area; prStr.println(" " + start + " " + area ); start = end; end = start + step; } // for prStr.println("#"); prStr.println("# Total area under curve = " + total_area ); prStr.println("#"); } // normal_curve /**

Write out a histogram for the Haar coefficient frequency spectrum in gnuplot format.

@param prStr PrintWriter object for output file @param num_bins Number of histogram bins @param m An object encapsulating the high and low values from v @param v The array of doubles to histogram */ private void histogram_coef(PrintWriter prStr, int num_bins, low_high m, double v[] ) { if (prStr != null && v != null) { prStr.println("#"); prStr.println("# Histogram of Haar coefficients"); prStr.println("#"); int len = v.length; double range = m.high - m.low; double step = range / (double)num_bins; double start = m.low; double end = start + step; int count = 0; int i = 0; double area = 0; while (i < len && end <= m.high ) { if (v[i] >= start && v[i] < end) { count++; i++; } else { double percent = (double)count / (double)len; area = area + percent; prStr.println(" " + start + " " + percent ); start = end; end = end + step; count = 0; } } // for prStr.println("#"); prStr.println("# Total area under curve = " + area ); prStr.println("#"); } } // histogram_coef /**

plot_freq

Generate histograms for a set of coefficients (passed in the argument v). Generate a seperate histogram for a normal curve. Both histograms have the same number of bins and the same scale.

The histograms are written to separate files in gnuplot format. Different files are needed (as far as I can tell) to allow different colored lines for the coefficient histogram and the normal plot. The file name reflects the number of points in the coefficient spectrum.

*/ private void plot_freq( double v[] ) throws IOException { if (v != null) { String file_name = "coef" + v.length; PrintWriter prStr = OpenFile( file_name ); if (prStr != null) { final int num_bins = 32; qsort.sort( v ); low_high m = new low_high(v[0], v[v.length-1]); histogram_coef( prStr, num_bins, m, v ); prStr.close(); file_name = "normal" + v.length; prStr = OpenFile( file_name ); if (prStr != null) { normal_curve( prStr, num_bins, m, v ); prStr.close(); } else { IOException ioerr = new IOException(); throw ioerr; } } else { IOException ioerr = new IOException(); throw ioerr; } } } // plot_freq /**

This function is passed an ordered set of Haar wavelet coefficients. For each frequency of coefficients a graph will be generated, in gnuplot format, that plots the ordered Haar coefficients as a histogram. A gaussian (normal) curve with the same mean and standard deviation will also be plotted for comparision.

The histogram for the coefficients is generated by counting the number of coefficients that fall within a particular bin range and then dividing by the total number of coefficients. This results in a histogram where all bins add up to one (or to put it another way, a curve whose area is one).

The standard formula for a normal curve results in a curve showing the probability profile. To convert this curve to the same scale as the coefficient histogram, the area under the curve is integrated over the range of each bin (corresponding to the coefficient histogram bins). The area under the normal curve is one, resulting in the same scale.

The size of the coefficient array must be a power of two. When the Haar coefficients are ordered (see inplace_haar) the coefficient frequencies are the component powers of two. For example, if the array length is 512, there will be 256 coefficients from the highest frequence from index 256 to 511. The next frequency set will be 128 in length, from 128 to 255, the next will be 64 in length from 64 to 127, etc...

As the number of coefficients decreases, the histograms become less meaningful. This function only graphs the coefficient spectrum down to 64 coefficients.

*/ public void plot_curves( double coef[] ) { if (coef != null) { final int min_coef = 64; int len = coef.length; int end = len; int start = len >> 1; while (start >= min_coef) { double v[] = new double[ start ]; int ix = 0; for (int i = start; i < end; i++) { v[ix] = coef[i]; ix++; } try { plot_freq( v ); } catch (Exception e) { break; // exit the while loop } end = start; start = end >> 1; } // for } } // plot_curves } // curve_plot wavelets/wavelet_util/bell_curves.java~100644 1040 1001 30145 7321426500 17177 0ustar everyone package wavelet_util; import java.io.*; import sort.qsort; /**

class bell_curves

Plot the Haar coefficients as a histogram, in Gnuplot format. In another file generate a normal curve with the mean and standard deviation of the histogram. Using two files allows the histogram and the normal curve to be plotted together using gnu plot, where the histogram and normal curve will be different colored lines. If the spectrum as 256 coefficient points, the files generated would be coef256 and normal256 for the coefficient histogram and the normal curve. To plot these using gnuplot the following command would be used:

   plot 'coef256' with boxes, 'normal256' with lines

This will result in a gnuplot graph where the histogram is one color and the normal curve is another.

A normal curve is a probability distribution, where the values are plotted on the x-axis and the probability of that value occuring is plotted on the y-axis. To plot the coefficient histogram in the same terms, the percentage of the total points is represented for each histogram bin. This is the same as the integral of the normal curve in the histogram range, if the coefficient histogram fell in a perfect normal distribution.

*/ public class bell_curves extends plot { String class_name() { return "bell_curves"; } /** A histogram "bin" */ private class bin { public bin() {}; // suppress default initialization public double start; // start of the bin public double percent; // percentage of the total points in bin } /** Encapsulate the low and high values of a number range */ private class low_high { public low_high() {} public low_high( double l, double h ) { low = l; high = h; } public double low; public double high; } /** Bell curve info: mean, sigma (the standard deviation) */ private class bell_info { public bell_info() {} public bell_info(double m, double s) { mean = m; sigma = s; } public double mean; public double sigma; } // bell_info /**

Calculate the mean and standard deviation.

The stddev function is passed an array of numbers. It returns the mean, standard deviation in the bell_info object.

*/ public bell_info stddev( double v[] ) { bell_info stats = null; if (v != null && v.length > 0) { int N = v.length; // calculate the mean (a.k.a average) double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + v[i]; } double mean = sum / (double)N; // calculate the standard deviation sum double stdDevSum = 0; double x; for (int i = 0; i < N; i++) { x = v[i] - mean; stdDevSum = stdDevSum + (x * x); } double sigmaSquared = stdDevSum / (N-1); double sigma = Math.sqrt( sigmaSquared ); stats = new bell_info(mean, sigma); } return stats; } // stddev /**

normal_interval

Numerically integreate the normal curve with mean info.mean and standard deviation info.sigma over the range low to high.

There normal curve equation that is integrated is:

       f(y) = (1/(s * sqrt(2 * pi)) e-(1/(2 * s2)(y-u)2
     

Where u is the mean and s is the standard deviation.

The area under the section of this curve from low to high is returned as the function result.

The normal curve equation results in a curve expressed as a probability distribution, where probabilities are expressed as values greater than zero and less than one. The total area under a normal curve is one.

The integral is calculated in a dumb fashion (e.g., we're not using anything fancy like simpson's rule). The area in the interval xi to xi+1 is

     area = (xi+1 - xi) * g(xi)
     

Where the function g(xi) is the point on the normal curve probability distribution at xi.

@param info This object encapsulates the mean and standard deviation @param low Start of the integral @param high End of the integral @param num_points Number of points to calculate (should be even) */ private double normal_interval(bell_info info, double low, double high, int num_points ) { double integral = 0; if (info != null) { double s = info.sigma; // calculate 1/(s * sqrt(2 * pi)), where s is the stddev double sigmaSqrt = 1.0 / (s * (Math.sqrt(2 * Math.PI))); double oneOverTwoSigmaSqrd = 1.0 / (2 * s * s); double range = high - low; double step = range / num_points; double x = low; double f_of_x; double area; double t; for (int i = 0; i < num_points-1; i++) { t = x - info.mean; f_of_x = sigmaSqrt * Math.exp( -(oneOverTwoSigmaSqrd * t * t) ); area = step * f_of_x; // area of one rectangle in the interval integral = integral + area; // sum of the rectangles x = x + step; } // for } return integral; } // normal_interval /**

Output a gnuplot formatted histogram of the area under a normal curve through the range m.low to m.high based on the mean and standard deviation of the values in the array v. The number of bins used in the histogram is num_bins

@param prStr PrintWriter object for output file @param num_bins Number of histogram bins @param m An object encapsulating the high and low values of v @param v An array of doubles from which the mean and standard deviation is calculated. */ private void normal_curve(PrintWriter prStr, int num_bins, low_high m, double v[] ) { // calculate the mean and standard deviation bell_info info = stddev( v ); int N = v.length; int points_per_bin = N/num_bins; double range = m.high - m.low; double step = range / (double)num_bins; double start = m.low; double end = start + step; double area; double total_area = 0; prStr.println("#"); prStr.println("# histogram of normal curve"); prStr.println("# mean = " + info.mean + ", std. dev. = " + info.sigma ); prStr.println("#"); for (int i = 0; i < num_bins; i++) { area = normal_interval( info, start, end, points_per_bin ); total_area = total_area + area; prStr.println(" " + start + " " + area ); start = end; end = start + step; } // for prStr.println("#"); prStr.println("# Total area under curve = " + total_area ); prStr.println("#"); } // normal_curve /**

Write out a histogram for the Haar coefficient frequency spectrum in gnuplot format.

@param prStr PrintWriter object for output file @param num_bins Number of histogram bins @param m An object encapsulating the high and low values from v @param v The array of doubles to histogram */ private void histogram_coef(PrintWriter prStr, int num_bins, low_high m, double v[] ) { if (prStr != null && v != null) { prStr.println("#"); prStr.println("# Histogram of Haar coefficients"); prStr.println("#"); int len = v.length; double range = m.high - m.low; double step = range / (double)num_bins; double start = m.low; double end = start + step; int count = 0; int i = 0; double area = 0; while (i < len && end <= m.high ) { if (v[i] >= start && v[i] < end) { count++; i++; } else { double percent = (double)count / (double)len; area = area + percent; prStr.println(" " + start + " " + percent ); start = end; end = end + step; count = 0; } } // for prStr.println("#"); prStr.println("# Total area under curve = " + area ); prStr.println("#"); } } // histogram_coef /**

plot_freq

Generate histograms for a set of coefficients (passed in the argument v). Generate a seperate histogram for a normal curve. Both histograms have the same number of bins and the same scale.

The histograms are written to separate files in gnuplot format. Different files are needed (as far as I can tell) to allow different colored lines for the coefficient histogram and the normal plot. The file name reflects the number of points in the coefficient spectrum.

*/ private void plot_freq( double v[] ) throws IOException { if (v != null) { String file_name = "coef" + v.length; PrintWriter prStr = OpenFile( file_name ); if (prStr != null) { final int num_bins = 32; qsort.sort( v ); low_high m = new low_high(v[0], v[v.length-1]); histogram_coef( prStr, num_bins, m, v ); prStr.close(); file_name = "normal" + v.length; prStr = OpenFile( file_name ); if (prStr != null) { normal_curve( prStr, num_bins, m, v ); prStr.close(); } else { IOException ioerr = new IOException(); throw ioerr; } } else { IOException ioerr = new IOException(); throw ioerr; } } } // plot_freq /**

This function is passed an ordered set of Haar wavelet coefficients. For each frequency of coefficients a graph will be generated, in gnuplot format, that plots the ordered Haar coefficients as a histogram. A gaussian (normal) curve with the same mean and standard deviation will also be plotted for comparision.

The histogram for the coefficients is generated by counting the number of coefficients that fall within a particular bin range and then dividing by the total number of coefficients. This results in a histogram where all bins add up to one (or to put it another way, a curve whose area is one).

The standard formula for a normal curve results in a curve showing the probability profile. To convert this curve to the same scale as the coefficient histogram, the area under the curve is integrated over the range of each bin (corresponding to the coefficient histogram bins). The area under the normal curve is one, resulting in the same scale.

The size of the coefficient array must be a power of two. When the Haar coefficients are ordered (see inplace_haar) the coefficient frequencies are the component powers of two. For example, if the array length is 512, there will be 256 coefficients from the highest frequence from index 256 to 511. The next frequency set will be 128 in length, from 128 to 255, the next will be 64 in length from 64 to 127, etc...

As the number of coefficients decreases, the histograms become less meaningful. This function only graphs the coefficient spectrum down to 64 coefficients.

*/ public void plot_curves( double coef[] ) { if (coef != null) { final int min_coef = 64; int len = coef.length; int end = len; int start = len >> 1; while (start >= min_coef) { double v[] = new double[ start ]; int ix = 0; for (int i = start; i < end; i++) { v[ix] = coef[i]; ix++; } try { plot_freq( v ); } catch (Exception e) { break; // exit the while loop } end = start; start = end >> 1; } // for } } // plot_curves } // curve_plot wavelets/wavelet_util/binary.java100644 1040 1001 2215 7303100756 15737 0ustar everyone package wavelet_util; /** * Calculate power and log functions using fast binary operations. */ public class binary { /** * Calculate 2n where n >= 0 @param n a value between 0..31 @return power2 returns 2n */ public static int power2( byte n) { int rslt = 0x1 << (n & 0x1f); return rslt; } // power2 /** *

Calculate floor( log2( val ) ), where val > 0

The log function is undefined for 0. @param val a positive value @return floor( log2( val ) ) */ public static byte log2(int val ) { byte log; for (log = 0; val > 0; log++, val = val >> 1) ; log--; return log; } // log2 /** nearestPower2

Given a value "val", where val > 0, nearestPower2 returns the power of 2 that is less than or equal to val. */ public static int nearestPower2( int val ) { byte l = log2( val ); int power = power2( l ); return power; } // nearestPower2 } // class binary wavelets/wavelet_util/coef_spectrum.java100644 1040 1001 10327 7321421413 17330 0ustar everyone package wavelet_util; import wavelets.*; import java.io.*; /**

After the wavelet transform is calculated, regenerate the time series with a given spectrum set to zero, or with all but a given spectrum set to zero. The plots are generated from the highest frequency spectrum to the lower frequency spectrums. The highest frequency spectrum is left out of later plots since this spectrum contains most of the noise.

Wavelets allow a time series to be examined by filtering the component spectrum. For example, the Haar wavelet transform can be calculated and the highest frequency spectrum of coefficients can be set to zero. When the reverse Haar transform is calculated, the time series will be regenerated without this spectrum.

Wavelets can also be used to look at a single spectrum in isolation. This can be done by setting all but the one spectrum to zero and then regenerating the time series. This will result in a time series showning the contribution of that spectrum.

*/ public class coef_spectrum extends plot { final private int min_spectrum = 64; String class_name() { return "coef_spectrum"; } /** Regenerate the time series from the coefficient array and output the time series to a file. */ private void output_time_series( String file_name, double coef[] ) { PrintWriter prStr = OpenFile( file_name ); if (prStr != null) { wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.setWavefx( coef ); haar.setIsOrdered(); haar.inverse(); // calculate the inverse Haar transform // the time series has been regenerated in the coef array for (int i = 0; i < coef.length; i++) { prStr.println( i + " " + coef[i] ); } prStr.close(); } } // output_time_series /** Make a copy of the coefficient array. If limit is greater than zero copy up to limit, otherwise copy the entire array. */ private double[] copy_coef( double coef[], int limit ) { double new_array[] = null; if (coef != null) { new_array = new double[ coef.length ]; int end = coef.length; if (limit > 0) end = limit; for (int i = 0; i < end; i++) { new_array[i] = coef[i]; } } return new_array; } // copy_coef /**

Moving from the high frequency coefficient spectrum to the lower frequency spectrum, set each spectrum to zero and output the regenerated time series to a file.

The highest frequency spectrum contains most of the noise so when subsequent spectrum are set to zero, the highest frequency spectrum is not included.

*/ public void filter_one_spectrum( double coef[] ) { int end = coef.length; int start = end >> 1; int noise_start = start; int limit = 0; while (start >= min_spectrum) { double new_array[] = copy_coef( coef, limit ); // set the spectrum to zero for (int i = start; i < end; i++) { new_array[i] = 0; } String file_name = "all_but_" + start; output_time_series( file_name, new_array ); end = start; start = end >> 1; limit = noise_start; } } // filter_one_spectrum /**

Moving from high frequency to lower frequency, regenerate the time series from only one spectrum.

Note that coef[0] contains the time series average and must exist for all coefficient arrays in order to regenerate the time series.

*/ public void only_one_spectrum( double coef[] ) { int end = coef.length; int start = end >> 1; while (start >= min_spectrum) { double new_array[] = new double[ coef.length ]; for (int i = 0; i < min_spectrum; i++) { new_array[i] = coef[i]; } // Copy spectrum for (int i = start; i < end; i++) { new_array[i] = coef[i]; } String file_name = "only_" + start; output_time_series( file_name, new_array ); end = start; start = end >> 1; } } // only_one_spectrum } // coef_spectrum wavelets/wavelet_util/gnuplot3D.java100644 1040 1001 7340 7315774761 16356 0ustar everyone package wavelet_util; import java.io.*; /**

Define the class gnuplot3D for the wavelet_util package.

The class outputs a Haar wavelet spectrum array in a format that can be read by gnuplot for 3D plotting. This function can be used to plot either Haar wavelet spectrums or Haar wavelet coefficients.

The constructor is given an array of Haar spectrum values and a file name. The Haar spectrum values will be written out to the file so that they can be graphed.

The length of the array must be 2N. The lengths of the spectrums are 2N-1, ... 20. If the original data was

{32.0, 10.0, 20.0, 38.0, 37.0, 28.0, 38.0, 34.0, 
 18.0, 24.0, 18.0,  9.0, 23.0, 24.0, 28.0, 34.0}

The Haar spectrum will be:

0.0
25.9375
29.625 22.25
25.0 34.25 17.25 27.25
21.0 29.0 32.5 36.0 21.0 13.5 23.5 31.0

If the original data length was 2N, then the total length of the spectrum data will be 2N-1, so the first element is zeroed out in the case of a Haar spectrum. In the case of the wavelet coefficients, the first value will be the average for the entire sample. In either case this value will not be output.

The plot used to display the Haar wavelet spectrums is modeled after the plots shown in Robi Polikar's Wavelet Tutorial, Part III. Here the x-axis represents the offset in the data. The y-axis represents the width of the Haar window (which will be a power of two) and the z-axis represents the spectrum value.

In order for gnuplot to display a 3D surface each line must have the same number of points. The wavelet spectrum is graphed over the original rage. The first spectrum repeats two values for each average or coefficient calculated. The second spectrum repeats each value four times, the third spectrum eight times, etc...

*/ public class gnuplot3D extends plot { String class_name() { return "gnuplot3D"; } /** Output a Haar spectrum where the x-axis is the sample value number, the y-axis is the log2 of the window width and the z-axis is the value (e.g., average or average difference). */ private void outputSpectrum( PrintWriter prStr, double[] values, int end, int windowWidth ) { if (end > 1) { if (windowWidth > 1) prStr.println(); int l = binary.log2( windowWidth ); int windowStart = 0; int windowEnd = windowWidth; int start = end >> 1; for (int i = start; i < end; i++) { for (int j = windowStart; j < windowEnd; j++) { prStr.println( j + " " + l + " " + values[ i ] ); } windowStart = windowEnd; windowEnd = windowEnd + windowWidth; } // for end = start; windowWidth = windowWidth << 1; // windowWidth = windowWidth * 2 outputSpectrum( prStr, values, end, windowWidth ); } } // outputSpectrum public gnuplot3D( double[] values, String path ) { PrintWriter prStr = OpenFile( path ); if (prStr != null) { prStr.println("#"); prStr.println("# Wavelet spectrum data formatted for gnuplot."); prStr.println("# To plot, use the command \"splot ''\""); prStr.println("# were is the file name."); prStr.println("#"); prStr.println("# {x, y, z} = point number, log2(windowWidth), value"); prStr.println("#"); int len = binary.nearestPower2( values.length ); int windowWidth = 2; outputSpectrum( prStr, values, len, windowWidth ); prStr.close(); } } // gnuplot3D } // gnuplot3D wavelets/wavelet_util/noise_filter.java100644 1040 1001 44674 7321415017 17173 0ustar everyone package wavelet_util; import java.util.Vector; import sort.*; import wavelets.*; import java.io.*; /**

The objective in filtering is to remove noise while keeping the features that are interesting.

Wavelets allow a time series to be examined at various resolutions. This can be a powerful tool in filtering out noise. This class supports the subtraction of gaussian noise from the time series.

The identification of noise is complex and I have not found any material that I could understand which discussed noise identification in the context of wavelets. I did find some material that has been difficult and frustrating. In particular Image Processing and Data Analysis: the multiscale approach by Starck, Murtagh and Bijaoui.

If the price of a stock follows a random walk, its price will be distributed in a bell (gaussian) curve. This is one way of stating the concept from financial theory that the daily return is normally distributed (here daily return is defined as the difference between yesterdays close price and today's close price). Movement outside the bounds of the curve may represent something other than a random walk and so, in theory, might be interesting.

At least in the case of the single test case used in developing this code (Applied Materials, symbol: AMAT), the coefficient distribution in the highest frequency is almost a perfect normal curve. That is, the mean is close to zero and the standard deviation is close to one. The area under this curve is very close to one. This resolution approximates the daily return. At lower frequencies the mean moves away from zero and the standard deviation increases. This results is a flattened curve, whose area in the coefficient range is increasingly less than one.

The code in this class subtracts the normal curve from the coefficients at each frequency up to some minimum. This leaves only the coefficients above the curve which are used to regenerate the time series (without the noise, in theory). This filter removes 50 to 60 percent of the coefficients.

Its probably worth mentioning that there are other kinds of noise, most notably Poisson noise. In theory daily data tends to show gaussian noise, while intraday data would should Poisson noise. Intraday Poisson noise would result from the random arrival and size of orders.

This function has two public methods:

  1. n filter_time_series, which is passed a file name and a time series

  2. gaussian_filter which is passed a set of Haar coefficient spectrum and an array allocated for the noise values. The noise array will be the same size as the coefficient array.

    1. */ public class noise_filter extends plot { String class_name() { return "noise_filter"; } /**

      The point class represents a coefficient value so that it can be sorted for histogramming and then resorted back into the orignal ordering (e.g., sorted by value and then sorted by index)

      */ private class point { point(int i, double v) { index = i; val = v; } public int index; // index in original array public double val; // coefficient value } // point /**

      A histogram bin

      For a histogram bin bi, the range of values is bi.start to bi+1.start.

      The vector object vals stores references to the point objects which fall in the bin range.

      The number of values in the bin is vals.size()

      */ private class bin { bin( double s ) { start = s; } public double start; public Vector vals = new Vector(); } // bin /** Bell curve info: mean, sigma (the standard deviation) */ private class bell_info { public bell_info() {} public bell_info(double m, double s) { mean = m; sigma = s; } public double mean; public double sigma; } // bell_info /**

      Build a histogram from the sorted data in the pointz array. The histogram is constructed by appending a point object to the the bin vals Vector if the value of the point is between b[i].start and b[i].start + step.

      */ private void histogram( bin binz[], point pointz[] ) { double step = binz[1].start - binz[0].start; double start = binz[0].start; double end = binz[1].start; int len = pointz.length; double max = binz[ binz.length-1 ].start + step; int i = 0; int ix = 0; while (i < len && ix < binz.length) { if (pointz[i].val >= start && pointz[i].val < end) { binz[ix].vals.addElement( pointz[i] ); i++; } else { ix++; start = end; end = end + step; } } // while } // histogram /** Sort an array of point objects by the index field. */ private class sort_by_index extends generic_sort { /** if (a.index == b.index) return 0 if (a.index < b.index) return -1 if (a.index > b.index) return 1; */ protected int compare( Object a, Object b ) { int rslt = 0; point t_a = (point)a; point t_b = (point)b; if (t_a.index < t_b.index) rslt = -1; else if (t_a.index > t_b.index) rslt = 1; return rslt; } // compare } // sort_by_index /** Sort an array of point objects by the val filed. */ private class sort_by_val extends generic_sort { /** if (a.val == b.val) return 0 if (a.val < b.val) return -1 if (a.val > b.val) return 1; */ protected int compare( Object a, Object b ) { int rslt = 0; point t_a = (point)a; point t_b = (point)b; if (t_a.val < t_b.val) rslt = -1; else if (t_a.val > t_b.val) rslt = 1; return rslt; } // compare } // sort_by_val /** Allocate an array of histogram bins that is num_bins in length. Initialize the start value of each bin with a start value calculated from low and high. */ private bin[] alloc_bins( int num_bins, double low, double high ) { double range = high - low; double step = range / (double)num_bins; double start = low; bin binz[] = new bin[ num_bins ]; for (int i = 0; i < num_bins; i++) { binz[i] = new bin( start ); start = start + step; } return binz; } // alloc_bins /**

      Calculate the histogram of the coefficients using num_bins histogram bins

      The Haar coefficients are stored in point objects which consist of the coefficient value and the index in the point array.

      To calculate the histogram, the pointz array is sorted by value. After it is histogrammed it is resorted by index to return the original ordering.

      */ private bin[] calc_histo( point pointz[], int num_bins ) { // sort by value sort_by_val by_val = new sort_by_val(); by_val.sort( pointz ); int len = pointz.length; double low = pointz[0].val; double high = pointz[len-1].val; bin binz[] = alloc_bins( num_bins, low, high ); histogram( binz, pointz ); // return the array to its original order by sorting by index sort_by_index by_index = new sort_by_index(); by_index.sort( pointz ); return binz; } // calc_histo /**

      Allocate and initialize an array of point objects. The size of the array is end - start. Each point object in the array is initialized with its index and a Haar coefficient (from the coef array).

      Since the allocation code has to iterate through the coefficient spectrum the mean and standard deviation are also calculated to avoid an extra iteration. These values are returned in the bell_info object.

      */ private point[] alloc_points( double coef[], int start, int end, bell_info info ) { int size = end - start; point pointz[] = new point[ size ]; double sum = 0; int ix = start; for (int i = 0; i < size; i++) { pointz[i] = new point( i, coef[ix] ); sum = sum + coef[ix]; ix++; } double mean = sum / (double)size; // now calculate the standard deviation double stdDevSum = 0; double x; for (int i = 0; i < size; i++) { x = pointz[i].val - mean; stdDevSum = stdDevSum + (x * x); } double sigmaSquared = stdDevSum / (size-1); double sigma = Math.sqrt( sigmaSquared ); info.mean = mean; info.sigma = sigma; return pointz; } // alloc_points /**

      normal_interval

      Numerically integreate the normal curve with mean info.mean and standard deviation info.sigma over the range low to high.

      There normal curve equation that is integrated is:

             f(y) = (1/(s * sqrt(2 * pi)) e-(1/(2 * s2)(y-u)2
           

      Where u is the mean and s is the standard deviation.

      The area under the section of this curve from low to high is returned as the function result.

      The normal curve equation results in a curve expressed as a probability distribution, where probabilities are expressed as values greater than zero and less than one. The total area under a normal curve with a mean of zero and a standard deviation of one is is one.

      The integral is calculated in a dumb fashion (e.g., we're not using anything fancy like simpson's rule). The area in the interval xi to xi+1 is

           area = (xi+1 - xi) * g(xi)
           

      where the function g(xi) is the point on the normal curve probability distribution at xi.

      @param info This object encapsulates the mean and standard deviation @param low Start of the integral @param high End of the integral @param num_points Number of points to calculate (should be even) */ private double normal_interval(bell_info info, double low, double high, int num_points ) { double integral = 0; if (info != null) { double s = info.sigma; // calculate 1/(s * sqrt(2 * pi)), where s is the stddev double sigmaSqrt = 1.0 / (s * (Math.sqrt(2 * Math.PI))); double oneOverTwoSigmaSqrd = 1.0 / (2 * s * s); double range = high - low; double step = range / num_points; double x = low; double f_of_x; double area; double t; for (int i = 0; i < num_points-1; i++) { t = x - info.mean; f_of_x = sigmaSqrt * Math.exp( -(oneOverTwoSigmaSqrd * t * t) ); area = step * f_of_x; // area of one rectangle in the interval integral = integral + area; // sum of the rectangles x = x + step; } // for } return integral; } // normal_interval /**

      Set num_points values in the histogram bin b to zero. Or, if the number of values is less than num_zero, set all values in the bin to zero.

      The num_zero argument is derived from the area under the normal curve in the histogram bin interval. This area is a fraction of the total curve area. When multiplied by the total number of coefficient points we get num_zero.

      The noise coefficients are preserved (returned) in the noise array argument.

      */ private void zero_points( bin b, int num_zero, double noise[] ) { int num = b.vals.size(); int end = num_zero; if (end > num) end = num; point p; for (int i = 0; i < end; i++) { p = (point)b.vals.elementAt( i ); noise[ p.index ] = p.val; p.val = 0; } } // zero_points /**

      Subtract the gaussian (or normal) curve from the histogram of the coefficients. This is done by integrating the gaussian curve over the range of a bin. If the number of items in the bin is less than or equal to the area under the curve in that interval, all items in the bin are set to zero. If the number of items in the bin is greater than the area under the curve, then a number of bin items equal to the curve area is set to zero.

      The area under a normal curve is always less than or equal to one. So the area returned by normal_interval is the fraction of the total area. This is multiplied by the total number of coefficients.

      The function returns the number of coefficients that are set to zero (e.g., the number of coefficients that fell within the gaussian curve). These coefficients are the noise coefficients. The noise coefficients are returned in the noise argument.

      */ private int subtract_gauss_curve( bin binz[], bell_info info, int total_points, double noise[] ) { int points_in_interval = total_points / binz.length; double start = binz[0].start; double end = binz[1].start; double step = end - start; double percent; int num_points; int total_zeroed = 0; for (int i = 0; i < binz.length; i++) { percent = normal_interval( info, start, end, points_in_interval ); num_points = (int)(percent * (double)total_points); total_zeroed = total_zeroed + num_points; if (num_points > 0) { zero_points( binz[i], num_points, noise ); } start = end; end = end + step; } // for return total_zeroed; } // subtract_gauss_curve /**

      This function is passed the section of the Haar coefficients that correspond to a single spectrum. It compares this spectrum to a gaussian curve and zeros out the coefficients within the gaussian curve.

      The function returns the number of points filtered out as the function result. The noise spectrum is also returned in the noise argument.

      */ private int filter_spectrum( double coef[], int start, int end, double noise[] ) { final int num_bins = 32; int num_filtered; bell_info info = new bell_info(); point pointz[] = alloc_points( coef, start, end, info ); bin binz[] = calc_histo( pointz, num_bins ); num_filtered = subtract_gauss_curve( binz, info, pointz.length, noise ); int zero_count = 0; // copy filtered coefficients back into the coefficient array int ix = start; for (int i = 0; i < pointz.length; i++) { coef[ix] = pointz[i].val; ix++; } return num_filtered; } // filter_spectrum /** Normalize the noise array to zero by subtracting the smallest value from all points. */ private void normalize_to_zero( double noise[] ) { double min = noise[0]; for (int i = 1; i < noise.length; i++) { if (min > noise[i]) min = noise[i]; } // for // normalize for (int i = 0; i < noise.length; i++) { noise[i] = noise[i] - min; } } // normalize_to_zero /**

      This function is passed a set of Haar wavelet coefficients that result from the Haar wavelet transform. It applies a gaussian noise filter to each frequency spectrum. This filter zeros out coefficients that fall within a gaussian curve. This alters the input data (the coef array).

      The coef argument is the input argument and contains the coefficients. The noise argument is an output argument and contains the coefficients that have been filtered out. This allows a noise spectrum to be rebuilt.

      */ public void gaussian_filter( double coef[], double noise[] ) { final int min_size = 64; // minimum spectrum size int total_filtered = 0; int num_filtered; int end = coef.length; int start = end >> 1; while (start >= min_size) { num_filtered = filter_spectrum( coef, start, end, noise ); total_filtered = total_filtered + num_filtered; end = start; start = end >> 1; } // Note that coef[0] is the average across the // time series. This value is needed to regenerate // the noise spectrum time series. noise[0] = coef[0]; System.out.println("gaussian_filter: total points filtered out = " + total_filtered ); } // gaussian_filter /** Calculate the Haar tranform on the time series (whose length must be a factor of two) and filter it. Then calculate the inverse transform and write the result to a file whose name is file_name. A noise spectrum is written to file_name_noise. */ public void filter_time_series( String file_name, double ts[] ) { double noise[] = new double[ ts.length ]; wavelets.inplace_haar haar = new wavelets.inplace_haar(); haar.wavelet_calc( ts ); haar.order(); gaussian_filter( ts, noise ); haar.inverse(); PrintWriter prStr = OpenFile( file_name ); if (prStr != null) { for (int i = 0; i < ts.length; i++) { prStr.println( i + " " + ts[i] ); } prStr.close(); } if (noise != null) { // calculate the inverse Haar function for the noise haar.setWavefx( noise ); haar.setIsOrdered(); haar.inverse(); normalize_to_zero( noise ); // write the noise spectrum out to a file prStr = OpenFile( file_name + "_noise" ); if (prStr != null) { for (int i = 0; i < noise.length; i++) { prStr.println( i + " " + noise[i] ); } prStr.close(); } } } // filter_time_series } // noise_filter wavelets/wavelet_util/plot.java100644 1040 1001 763 7317254147 15430 0ustar everyone package wavelet_util; import java.io.*; public abstract class plot { abstract String class_name(); public PrintWriter OpenFile( String path ) { PrintWriter prStr = null; try { FileOutputStream outStr = new FileOutputStream( path ); prStr = new PrintWriter( outStr ); } catch (Exception e) { System.out.println( class_name() + ": file name = " + path + ", " + e.getMessage() ); } return prStr; } // OpenFile } // plot