Here's what seems to work. I load the data in an ajax call with the response type "arraybuffer". Otherwise, the response ends up being a string and it is a mess to work with. Then I convert to a 16-bit array. Then I convert that to a Float32 array in the way that works with the WAV encoding. I also needed to throw away the WAV's header, and some metadata at the end of it.
// These are ready to be copied into an AudioBufferSourceNode's channel data.
var theWavDataInFloat32;
function floatTo16Bit(inputArray, startIndex){
var output = new Uint16Array(inputArray.length-startIndex);
for (var i = 0; i < inputArray.length; i++){
var s = Math.max(-1, Math.min(1, inputArray[i]));
output[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
}
return output;
}
// This is passed in an unsigned 16-bit integer array. It is converted to a 32-bit float array.
// The first startIndex items are skipped, and only 'length' number of items is converted.
function int16ToFloat32(inputArray, startIndex, length) {
var output = new Float32Array(inputArray.length-startIndex);
for (var i = startIndex; i < length; i++) {
var int = inputArray[i];
// If the high bit is on, then it is a negative number, and actually counts backwards.
var float = (int >= 0x8000) ? -(0x10000 - int) / 0x8000 : int / 0x7FFF;
output[i] = float;
}
return output;
}
// TEST
var data = [ 65424, 18, 0, 32700, 33000, 1000, 50000 ];
var testDataInt = new Uint16Array(data);
var testDataFloat = int16ToFloat32(testDataInt, 0, data.length);
var testDataInt2 = floatTo16Bit(testDataFloat, 0);
// At this point testDataInt2 should be pretty close to the original data array (there is a little rounding.)
var xhr = new XMLHttpRequest();
xhr.open('GET', '/my-sound.wav', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function(e) {
if (this.status === 200) {
// This retrieves the entire wav file. We're only interested in the data portion.
// At the beginning is 44 bytes (22 words) of header, and at the end is some metadata about the file.
// The actual data length is held in bytes 40 - 44.
var data = new Uint16Array(this.response);
var length = (data[20] + data[21] * 0x10000) / 2; // The length is in bytes, but the array is 16 bits, so divide by 2.
theWavDataInFloat32 = int16ToFloat32(data, 22, length);
}
};
xhr.send();
Answer from Paulie on Stack OverflowHere's what seems to work. I load the data in an ajax call with the response type "arraybuffer". Otherwise, the response ends up being a string and it is a mess to work with. Then I convert to a 16-bit array. Then I convert that to a Float32 array in the way that works with the WAV encoding. I also needed to throw away the WAV's header, and some metadata at the end of it.
// These are ready to be copied into an AudioBufferSourceNode's channel data.
var theWavDataInFloat32;
function floatTo16Bit(inputArray, startIndex){
var output = new Uint16Array(inputArray.length-startIndex);
for (var i = 0; i < inputArray.length; i++){
var s = Math.max(-1, Math.min(1, inputArray[i]));
output[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
}
return output;
}
// This is passed in an unsigned 16-bit integer array. It is converted to a 32-bit float array.
// The first startIndex items are skipped, and only 'length' number of items is converted.
function int16ToFloat32(inputArray, startIndex, length) {
var output = new Float32Array(inputArray.length-startIndex);
for (var i = startIndex; i < length; i++) {
var int = inputArray[i];
// If the high bit is on, then it is a negative number, and actually counts backwards.
var float = (int >= 0x8000) ? -(0x10000 - int) / 0x8000 : int / 0x7FFF;
output[i] = float;
}
return output;
}
// TEST
var data = [ 65424, 18, 0, 32700, 33000, 1000, 50000 ];
var testDataInt = new Uint16Array(data);
var testDataFloat = int16ToFloat32(testDataInt, 0, data.length);
var testDataInt2 = floatTo16Bit(testDataFloat, 0);
// At this point testDataInt2 should be pretty close to the original data array (there is a little rounding.)
var xhr = new XMLHttpRequest();
xhr.open('GET', '/my-sound.wav', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function(e) {
if (this.status === 200) {
// This retrieves the entire wav file. We're only interested in the data portion.
// At the beginning is 44 bytes (22 words) of header, and at the end is some metadata about the file.
// The actual data length is held in bytes 40 - 44.
var data = new Uint16Array(this.response);
var length = (data[20] + data[21] * 0x10000) / 2; // The length is in bytes, but the array is 16 bits, so divide by 2.
theWavDataInFloat32 = int16ToFloat32(data, 22, length);
}
};
xhr.send();
Each real data of the array input is reduced to the interval [-1, 1]
- Math.min(1, x) gives x if x<=1 and 1 otherwise
- Math.max(-1, y) gives y if y>=-1 and -1 otherwise
Then this real number between -1 and 1 is converted to a signed 16-bit integer.
Whether it is positive or negative multiplied by 32767 or -32768 then it retains only the integer part by a cast. This is equivalent to keep only 16 significant bits after the decimal point in the binary representation.
16 bits integers are stored in little Endian on two-byte in buffer, one after the other (according to the offset that advances two into two)
For the reverse operation, simply place two consecutive bytes in an int16. To convert it into a real distinguishing two cases following the sign and dividing by 32768 or 32767. But the operation will be done with data loss.
javascript - What is the type of the bytes of UInt32, Int32 and Float32? - Stack Overflow
floating point - Difference between floats and ints in Javascript? - Stack Overflow
javascript - Converting presentation of a buffer from int8 to float32 in JS - Stack Overflow
About the binary format of JavaScript TypedArray float32 - Stack Overflow
I emailed the developer of glMatrix and my answer below includes his comments (points 2 & 3):
Creating a new object is generally quicker with
ArraythanFloat32Array. The gain is significant for small arrays, but is less (environment dependent) with larger arrays.Accessing data from a
TypedArray(eg.Float32Array) is often faster than from a normal array, which means that most array operations (aside from creating a new object) are faster with TypedArrays.As also stated by @emidander,
glMatrixwas developed primarily for WebGL, which requires that vectors and matrices be passed asFloat32Array. So, for a WebGL application, the potentially costly conversion fromArraytoFloat32Arraywould need to be included in any performance measurement.
So, not surprisingly, the best choice is application dependent:
If arrays are generally small, and/or number of operations on them is low so that the constructor time is a significant proportion of the array's lifespan, use
Array.If code readability is as important as performance, then use
Array(i.e. use[], instead of a constructor).If arrays are very large and/or are used for many operations, then use a TypedArray.
For WebGL applications (or other applications that would otherwise require a type conversion), use
Float32Array(or otherTypedArray).
I would assume that the glMatrix library uses Float32Array because it is primarily used in WebGL-applications, where matrices are represented as Float32Arrays (http://www.khronos.org/registry/webgl/specs/1.0/#5.14.10).
(A lot has changed since 2011 when this answer was posted - see updates below)
2019-June Update
BigInt has been out in V8 (Node.js and Chromium-based browsers) since May 2018. It should land in Firefox 68 - see the SpiderMonkey ticket. Also implemented in WebKit.
BigDecimal hasn't been implemented by any engine yet. Look at alternative library.
2015 Update
It's been over 4 years since I wrote this answer and the situation is much more complicated now.
Now we have:
- typed arrays
- asm.js
- emscripten
Soon we'll have:
- WebAssembly with the spec developed on GitHub
It means that the number of numeric types available in JavaScript will grow from just one:
- 64-bit floating point (the IEEE 754 double precision floating-point number - see: ECMA-262 Edition 5.1, Section 8.5 and ECMA-262 Edition 6.0, Section 6.1.6)
to at least the following in WebAssembly:
- 8-bit integer (signed and unsigned)
- 16-bit integer (signed and unsigned)
- 32-bit integer (signed and unsigned)
- 64-bit integer (signed and unsigned)
- 32-bit floating point
- 64-bit floating point
(Technically the internal representations of all integer types are unsigned at the lowest level but different operators can treat them as signed or unsigned, like e.g. int32.sdiv vs. int32.udiv etc.)
Those are available in typed arrays:
- 8-bit two's complement signed integer
- 8-bit unsigned integer
- 8-bit unsigned integer (clamped)
- 16-bit two's complement signed integer
- 16-bit unsigned integer
- 32-bit two's complement signed integer
- 32-bit unsigned integer
- 32-bit IEEE floating point number
- 64-bit IEEE floating point number
asm.js defines the following numeric types:
- int
- signed
- unsigned
- intish
- fixnum
- double
- double?
- float
- float?
- floatish
Original 2011 answer
There is only one number type in JavaScript – the IEEE 754 double precision floating-point number.
See those questions for some consequences of that fact:
- Avoiding problems with javascript weird decimal calculations
- Node giving strange output on the sum of particular float digits
- Javascript infinity object
Although there is only one type of number in Javascript many programmers like to show that their code works with floating point numbers as well as integers. The reason for showing the decimal point is for documentation.
var isNegative = number < 0 || number == 0 && 1 / number < 0;
This works exactly the same as in the Closure Library. But some programmers reading the code would think that it only worked with integers.
Addendum:- I've recently come accross an article by D. Baranovskiy who makes many criticisms of the Google Closure library and points out that “It’s a JavaScript library written by Java developers who clearly don’t get JavaScript.” He points out more examples of this type confusion, in color.js https://github.com/google/closure-library/blob/master/closure/goog/color/color.js
https://www.sitepoint.com/google-closure-how-not-to-write-javascript/
JavaScript's number type is IEEE-754 double-precision binary floating point; it doesn't have an integer type except temporarily during some math operations or as part of a typed array (Int32Array, for instance, or a Uint32Array if you mean unsigned). So you have two options:
Ensure that the number has a value that fits in a 32-bit int, even though it's still a number (floating point double). One way to do that is to do a bitwise OR operation with the value
0, because the bitwise operations in JavaScript convert their operands to 32-bit integers before doing the operation:| 0does a signed conversion using the specification'sToInt32operation:value = value | 0; // Use `value`...With that,
-5becomes-5.123456789123becomes-1097262461(yes, negative).or
>>> 0does an unsigned conversion using the spec'sToUint32:value = value >>> 0; // Use `value`...The latter converts to unsigned 32-bit int.
-5becomes4294967291,123456789123becomes3197704835.Use an
Int32ArrayorUint32Array:const a = new Int32Array(1); // Or use Uint32Array for unsigned a[0] = value; // Use `a[0]`...Int32ArrayusesToInt32,Uint32ArrayusesToUint32.Note that any time you use
a[0], it will be converted back to a standard number (floating point double), but if you use the array, depending on what you use it for, it will get used as-is.
Note that there's a method that may seem like it's for doing this, but isn't: Math.fround. That doesn't convert to 32-bit int, it converts to 32-bit float (IEEE-754 single-precision floating point). So it isn't useful for this.
Well, the easiest way I found is using bitwise not ~.
This is the description from MDN:
The operands are converted to 32-bit integers and expressed by a series of bits (zeroes and ones).
So you can just type double ~ to convert your numbers. Here's some examples:
~~1 // 1
~~-1 // -1
~~5.05 // 5
~~-5.05 // -5
~~2147483647 // 2147483647
~~2147483648 // -2147483648
~~Math.pow(2, 32) // 0
The ES6 standard has Math.fround() which converts a float64 to float32 and then back again, effectively rounding the float to float32 precision. See this article for details.
Try this JavaScript function. It uses .toFixed(6) to round off the number to six decimal places.
function ToSingle(s) {
s = s.toString().toUpperCase();
if (s.indexOf("E") == -1) s = parseFloat(s).toExponential().toUpperCase();
if (s.indexOf("E") == -1) return s
var o = s.split("E");
var s1 = o[0];
if (s1.indexOf(".") == -1) return s
if (s1.split(".")[1].length < 7) return s;
var num = parseFloat(s1);
if (num + "" == "NaN") return s;
return num.toFixed(6) + "E" + o[1];
}
Replacing the while by this, is the solution:
while (l--) {
s = Math.max(-1, Math.min(1, samples[l]));
buf[l] = s < 0 ? s * 0x8000 : s * 0x7FFF;
//buf[l] = buffer[l]*0xFFFF; //old //convert to 16 bit
}
}
Now, the records sounds perfect and the Matlab plots to.
This is working for me:
const int16Array = Int16Array.from(buffer, x => x * 32767);
Hi
in my toy language, I want to have a single float type "decimal". I am not sure if I should go with f16 or f32 internally.
I assume f32 will take more memory but in today's world is that even relevant.
I also read somewhere that GPUs don't support f32 and I need to have f16 anyway if I want to use any UI libraries.
At this point, I really am not sure what i should go for. I really want to keep a single floating type. My language is not targetted at IoT devices and performance is one of the goals.
Why not go for Float64? This gives you the most accuracy, and in a toy language I doubt it will have any significant performance difference from the other float types.
Also, you shouldn't name your float type "decimal" unless it is an actual decimal float (as opposed to a binary float), since it will just cause confusion.
Calling a binary floating point type "decimal" would certainly be ill advised.