Here's what seems to work. I load the data in an ajax call with the response type "arraybuffer". Otherwise, the response ends up being a string and it is a mess to work with. Then I convert to a 16-bit array. Then I convert that to a Float32 array in the way that works with the WAV encoding. I also needed to throw away the WAV's header, and some metadata at the end of it.

// These are ready to be copied into an AudioBufferSourceNode's channel data.
var theWavDataInFloat32;

function floatTo16Bit(inputArray, startIndex){
    var output = new Uint16Array(inputArray.length-startIndex);
    for (var i = 0; i < inputArray.length; i++){
        var s = Math.max(-1, Math.min(1, inputArray[i]));
        output[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
    }
    return output;
}

// This is passed in an unsigned 16-bit integer array. It is converted to a 32-bit float array.
// The first startIndex items are skipped, and only 'length' number of items is converted.
function int16ToFloat32(inputArray, startIndex, length) {
    var output = new Float32Array(inputArray.length-startIndex);
    for (var i = startIndex; i < length; i++) {
        var int = inputArray[i];
        // If the high bit is on, then it is a negative number, and actually counts backwards.
        var float = (int >= 0x8000) ? -(0x10000 - int) / 0x8000 : int / 0x7FFF;
        output[i] = float;
    }
    return output;
}

// TEST
var data = [ 65424, 18, 0, 32700, 33000, 1000, 50000 ];
var testDataInt = new Uint16Array(data);
var testDataFloat = int16ToFloat32(testDataInt, 0, data.length);
var testDataInt2 = floatTo16Bit(testDataFloat, 0);
// At this point testDataInt2 should be pretty close to the original data array (there is a little rounding.)

var xhr = new XMLHttpRequest();
xhr.open('GET', '/my-sound.wav', true);
xhr.responseType = 'arraybuffer';

xhr.onload = function(e) {
    if (this.status === 200) {
        // This retrieves the entire wav file. We're only interested in the data portion.
        // At the beginning is 44 bytes (22 words) of header, and at the end is some metadata about the file.
        // The actual data length is held in bytes 40 - 44.
        var data = new Uint16Array(this.response);
        var length = (data[20] + data[21] * 0x10000) / 2; // The length is in bytes, but the array is 16 bits, so divide by 2.
        theWavDataInFloat32 = int16ToFloat32(data, 22, length);
    }
};

xhr.send();
Answer from Paulie on Stack Overflow
🌐
Mozilla
blog.mozilla.org › javascript › 2013 › 11 › 07 › efficient-float32-arithmetic-in-javascript
Efficient float32 arithmetic in JavaScript - The Mozilla Blog
November 7, 2013 - Float32 specializations of math functions in several standard C libraries we’ve tested are way faster than their float64 equivalents. In JavaScript, number values are defined to be float64 and all number arithmetic is defined to use float64 arithmetic.
Top answer
1 of 2
12

Here's what seems to work. I load the data in an ajax call with the response type "arraybuffer". Otherwise, the response ends up being a string and it is a mess to work with. Then I convert to a 16-bit array. Then I convert that to a Float32 array in the way that works with the WAV encoding. I also needed to throw away the WAV's header, and some metadata at the end of it.

// These are ready to be copied into an AudioBufferSourceNode's channel data.
var theWavDataInFloat32;

function floatTo16Bit(inputArray, startIndex){
    var output = new Uint16Array(inputArray.length-startIndex);
    for (var i = 0; i < inputArray.length; i++){
        var s = Math.max(-1, Math.min(1, inputArray[i]));
        output[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
    }
    return output;
}

// This is passed in an unsigned 16-bit integer array. It is converted to a 32-bit float array.
// The first startIndex items are skipped, and only 'length' number of items is converted.
function int16ToFloat32(inputArray, startIndex, length) {
    var output = new Float32Array(inputArray.length-startIndex);
    for (var i = startIndex; i < length; i++) {
        var int = inputArray[i];
        // If the high bit is on, then it is a negative number, and actually counts backwards.
        var float = (int >= 0x8000) ? -(0x10000 - int) / 0x8000 : int / 0x7FFF;
        output[i] = float;
    }
    return output;
}

// TEST
var data = [ 65424, 18, 0, 32700, 33000, 1000, 50000 ];
var testDataInt = new Uint16Array(data);
var testDataFloat = int16ToFloat32(testDataInt, 0, data.length);
var testDataInt2 = floatTo16Bit(testDataFloat, 0);
// At this point testDataInt2 should be pretty close to the original data array (there is a little rounding.)

var xhr = new XMLHttpRequest();
xhr.open('GET', '/my-sound.wav', true);
xhr.responseType = 'arraybuffer';

xhr.onload = function(e) {
    if (this.status === 200) {
        // This retrieves the entire wav file. We're only interested in the data portion.
        // At the beginning is 44 bytes (22 words) of header, and at the end is some metadata about the file.
        // The actual data length is held in bytes 40 - 44.
        var data = new Uint16Array(this.response);
        var length = (data[20] + data[21] * 0x10000) / 2; // The length is in bytes, but the array is 16 bits, so divide by 2.
        theWavDataInFloat32 = int16ToFloat32(data, 22, length);
    }
};

xhr.send();
2 of 2
3

Each real data of the array input is reduced to the interval [-1, 1]

  • Math.min(1, x) gives x if x<=1 and 1 otherwise
  • Math.max(-1, y) gives y if y>=-1 and -1 otherwise

Then this real number between -1 and 1 is converted to a signed 16-bit integer.

Whether it is positive or negative multiplied by 32767 or -32768 then it retains only the integer part by a cast. This is equivalent to keep only 16 significant bits after the decimal point in the binary representation.

16 bits integers are stored in little Endian on two-byte in buffer, one after the other (according to the offset that advances two into two)

For the reverse operation, simply place two consecutive bytes in an int16. To convert it into a real distinguishing two cases following the sign and dividing by 32768 or 32767. But the operation will be done with data loss.

Discussions

javascript - What is the type of the bytes of UInt32, Int32 and Float32? - Stack Overflow
DataView doesn't "convert UInt32, Int32 and Float32 to bytes", it converts JavaScript Numbers to bytes in UInt32, Int32, and Float32 formats. More on stackoverflow.com
🌐 stackoverflow.com
October 3, 2016
floating point - Difference between floats and ints in Javascript? - Stack Overflow
I'm looking through some of the code from the Google Closure Library and I found this line: var isNegative = number More on stackoverflow.com
🌐 stackoverflow.com
javascript - Converting presentation of a buffer from int8 to float32 in JS - Stack Overflow
In a buffer I have a sequence of bytes read from a file. And I need to represent them as a sequence of float32. Size of each file data block is about 15KB so I don't want to copy from this buffer to More on stackoverflow.com
🌐 stackoverflow.com
January 29, 2021
About the binary format of JavaScript TypedArray float32 - Stack Overflow
I need to write a piece of hardware emulator in JavaScript. It has its own floating point format, so I do lots of conversion between JS numerics and that format, which is slow. I have the idea to use More on stackoverflow.com
🌐 stackoverflow.com
Top answer
1 of 4
99

I emailed the developer of glMatrix and my answer below includes his comments (points 2 & 3):

  1. Creating a new object is generally quicker with Array than Float32Array. The gain is significant for small arrays, but is less (environment dependent) with larger arrays.

  2. Accessing data from a TypedArray (eg. Float32Array) is often faster than from a normal array, which means that most array operations (aside from creating a new object) are faster with TypedArrays.

  3. As also stated by @emidander, glMatrix was developed primarily for WebGL, which requires that vectors and matrices be passed as Float32Array. So, for a WebGL application, the potentially costly conversion from Array to Float32Array would need to be included in any performance measurement.

So, not surprisingly, the best choice is application dependent:

  • If arrays are generally small, and/or number of operations on them is low so that the constructor time is a significant proportion of the array's lifespan, use Array.

  • If code readability is as important as performance, then use Array (i.e. use [], instead of a constructor).

  • If arrays are very large and/or are used for many operations, then use a TypedArray.

  • For WebGL applications (or other applications that would otherwise require a type conversion), use Float32Array (or other TypedArray).

2 of 4
8

I would assume that the glMatrix library uses Float32Array because it is primarily used in WebGL-applications, where matrices are represented as Float32Arrays (http://www.khronos.org/registry/webgl/specs/1.0/#5.14.10).

🌐
Mozilla
blog.mozilla.org › javascript › page › 2
JavaScript | All about Mozilla's JavaScript engine | Page 2
November 7, 2013 - Float32 specializations of math functions in several standard C libraries we’ve tested are way faster than their float64 equivalents. In JavaScript, number values are defined to be float64 and all number arithmetic is defined to use float64 arithmetic.
🌐
MDN Web Docs
developer.mozilla.org › en-US › docs › Web › JavaScript › Reference › Global_Objects › Float32Array
Float32Array - JavaScript - MDN Web Docs
July 10, 2025 - The Float32Array typed array represents an array of 32-bit floating point numbers in the platform byte order. If control over byte order is needed, use DataView instead. The contents are initialized to 0 unless initialization data is explicitly provided. Once established, you can reference elements in the array using the object's methods, or using standard array index syntax (that is, using bracket notation).
Top answer
1 of 2
22

(A lot has changed since 2011 when this answer was posted - see updates below)

2019-June Update

BigInt has been out in V8 (Node.js and Chromium-based browsers) since May 2018. It should land in Firefox 68 - see the SpiderMonkey ticket. Also implemented in WebKit.

BigDecimal hasn't been implemented by any engine yet. Look at alternative library.

2015 Update

It's been over 4 years since I wrote this answer and the situation is much more complicated now.

Now we have:

  • typed arrays
  • asm.js
  • emscripten

Soon we'll have:

  • WebAssembly with the spec developed on GitHub

It means that the number of numeric types available in JavaScript will grow from just one:

  • 64-bit floating point (the IEEE 754 double precision floating-point number - see: ECMA-262 Edition 5.1, Section 8.5 and ECMA-262 Edition 6.0, Section 6.1.6)

to at least the following in WebAssembly:

  • 8-bit integer (signed and unsigned)
  • 16-bit integer (signed and unsigned)
  • 32-bit integer (signed and unsigned)
  • 64-bit integer (signed and unsigned)
  • 32-bit floating point
  • 64-bit floating point

(Technically the internal representations of all integer types are unsigned at the lowest level but different operators can treat them as signed or unsigned, like e.g. int32.sdiv vs. int32.udiv etc.)

Those are available in typed arrays:

  • 8-bit two's complement signed integer
  • 8-bit unsigned integer
  • 8-bit unsigned integer (clamped)
  • 16-bit two's complement signed integer
  • 16-bit unsigned integer
  • 32-bit two's complement signed integer
  • 32-bit unsigned integer
  • 32-bit IEEE floating point number
  • 64-bit IEEE floating point number

asm.js defines the following numeric types:

  • int
  • signed
  • unsigned
  • intish
  • fixnum
  • double
  • double?
  • float
  • float?
  • floatish

Original 2011 answer

There is only one number type in JavaScript – the IEEE 754 double precision floating-point number.

See those questions for some consequences of that fact:

  • Avoiding problems with javascript weird decimal calculations
  • Node giving strange output on the sum of particular float digits
  • Javascript infinity object
2 of 2
5

Although there is only one type of number in Javascript many programmers like to show that their code works with floating point numbers as well as integers. The reason for showing the decimal point is for documentation.

var isNegative = number < 0 || number == 0 && 1 / number < 0;

This works exactly the same as in the Closure Library. But some programmers reading the code would think that it only worked with integers.

Addendum:- I've recently come accross an article by D. Baranovskiy who makes many criticisms of the Google Closure library and points out that “It’s a JavaScript library written by Java developers who clearly don’t get JavaScript.” He points out more examples of this type confusion, in color.js https://github.com/google/closure-library/blob/master/closure/goog/color/color.js

https://www.sitepoint.com/google-closure-how-not-to-write-javascript/

Find elsewhere
Top answer
1 of 1
2

The Float32Array will internally represent the values bases on the endianess of the host system, typically little-endian.

And yes, the format is IEEE 754 (this has been around since before FPUs came along, and the variations of it deals with more the width, ie. 64-bit, 80-bit and so on). All numbers (Number) in JavaScript is internally represented as 64-bit IEEE 754. For typed arrays both 32-bit and 64-bit IEEE 754 is of course available.

PowerPC and 68k CPUs uses big-endian (the way it ought to be! :) ). The so-called network order is also big-endian, and many platform-independent file formats are stored in big-endian byte order (in particular with audio and graphics). Most mainstream computers uses little-endian CPUs such as the x86. So in cases with a combination of these you very likely have to deal with different byte orders.

To deal with endianess you can instead of using a Float32Array use a DataView.

For example:

var buffer = new ArrayBuffer(10240);    // some raw byte buffer
var view = new DataView(buffer);        // flexible view supporting endianness

Now you can now read and write to any position in the buffer with endianess in mind (DataView also allow reading/writing from/to non-aligned positions, ie. you can write a Float32 value to position 3 if you need to. You cannot do this with Float32/Uint32/Int16 etc.).

The browser will internally convert to the correct order - you just provide the value as-is:

view.setFloat32(pos, 0.5);              // big-endian
view.setFloat32(pos, 0.5, false)        // big-endian
view.setFloat32(pos, 0.5, true);        // little-endian

And likewise when reading:

var n = view.getFloat32(pos);           // big-endian
var n = view.getFloat32(pos, false)     // big-endian
var n = view.getFloat32(pos, true);     // little-endian

Tip: You can use a native Float32Array internally and later read/write to it using endianess. This tend to be speedier but it requires a conversion using DataView at the end if the resulting buffer's endianess is different from the host system:

var f32 = new Float32Array(buffer);     // or use a size
f32[0] = 0.5;

Then to make sure you have big-endian representation:

var view = new DataView(f32.buffer);
var msbVal = view.getFloat32(0):        // returns 32-bit repres. in big-endian

Hope that gave some inputs! Just throw me questions about it if you want me to elaborate on some part.

🌐
GitHub
github.com › stdlib-js › number-float32-base-to-int32
GitHub - stdlib-js/number-float32-base-to-int32: Convert a single-precision floating-point number to a signed 32-bit integer. · GitHub
var randu = require( '@stdlib/random-base-randu' ); var round = require( '@stdlib/math-base-special-round' ); var MAX_INT = require( '@stdlib/constants-uint32-max' ); var float64ToFloat32 = require( '@stdlib/number-float64-base-to-float32' ); var float32ToInt32 = require( '@stdlib/number-float32-base-to-int32' ); var int32; var f32; var i; for ( i = 0; i < 500; i++ ) { // Generate a random single-precision floating-point integer: f32 = float64ToFloat32( round( randu()*MAX_INT ) ); // Convert the single-precision floating-point integer to a signed integer: int32 = float32ToInt32( f32 ); console
Author   stdlib-js
Top answer
1 of 2
3

JavaScript's number type is IEEE-754 double-precision binary floating point; it doesn't have an integer type except temporarily during some math operations or as part of a typed array (Int32Array, for instance, or a Uint32Array if you mean unsigned). So you have two options:

  1. Ensure that the number has a value that fits in a 32-bit int, even though it's still a number (floating point double). One way to do that is to do a bitwise OR operation with the value 0, because the bitwise operations in JavaScript convert their operands to 32-bit integers before doing the operation:

    | 0 does a signed conversion using the specification's ToInt32 operation:

    value = value | 0;
    // Use `value`...
    

    With that, -5 becomes -5. 123456789123 becomes -1097262461 (yes, negative).

    or >>> 0 does an unsigned conversion using the spec's ToUint32:

    value = value >>> 0;
    // Use `value`...
    

    The latter converts to unsigned 32-bit int. -5 becomes 4294967291, 123456789123 becomes 3197704835.

  2. Use an Int32Array or Uint32Array:

    const a = new Int32Array(1); // Or use Uint32Array for unsigned
    a[0] = value;
    // Use `a[0]`...
    

    Int32Array uses ToInt32, Uint32Array uses ToUint32.

    Note that any time you use a[0], it will be converted back to a standard number (floating point double), but if you use the array, depending on what you use it for, it will get used as-is.

Note that there's a method that may seem like it's for doing this, but isn't: Math.fround. That doesn't convert to 32-bit int, it converts to 32-bit float (IEEE-754 single-precision floating point). So it isn't useful for this.

2 of 2
0

Well, the easiest way I found is using bitwise not ~.

This is the description from MDN:

The operands are converted to 32-bit integers and expressed by a series of bits (zeroes and ones).

So you can just type double ~ to convert your numbers. Here's some examples:

~~1 // 1
~~-1 // -1
~~5.05 // 5
~~-5.05 // -5
~~2147483647 // 2147483647
~~2147483648 // -2147483648
~~Math.pow(2, 32) // 0
🌐
2ality
2ality.com › 2012 › 02 › js-integers.html
Integers and shift operators in JavaScript
February 20, 2012 - Int32: 32 bit signed integers in the range [−231, 231−1]. Used for: bitwise not, binary bitwise operators, unsigned shift.
🌐
Sololearn
sololearn.com › en › Discuss › 3057798 › difference-between-float-and-int
Difference between float and int | Sololearn: Learn to code for FREE!
July 9, 2022 - Sololearn is the world's largest community of people learning to code. With over 25 programming courses, choose from thousands of topics to learn how to code, brush up your programming knowledge, upskill your technical ability, or stay informed about the latest trends.
🌐
GitHub
github.com › stdlib-js › array-float32
GitHub - stdlib-js/array-float32: Float32Array. · GitHub
var randu = require( '@stdlib/random-base-randu' ); var ctor = require( '@stdlib/array-float32' ); var arr; var i; arr = new ctor( 10 ); for ( i = 0; i < arr.length; i++ ) { arr[ i ] = randu() * 100.0; } console.log( arr ); ... This package is part of stdlib, a standard library for JavaScript and Node.js, with an emphasis on numerical and scientific computing.
Author   stdlib-js
🌐
GeeksforGeeks
geeksforgeeks.org › javascript-float32array-from-method
JavaScript Float32Array.from() Method | GeeksforGeeks
May 26, 2023 - The Javascript Float32Array array represents an array of 32-bit floating-point numbers in the platform byte order. By default, the contents of Float32Array are initialized to 0. The Float32Array.from() method is used to create a new Float32Array from an array-like or iterable object.
🌐
Quora
quora.com › If-JavaScript-numbers-are-always-64-bit-floating-point-how-does-it-work-with-32-bit-machines
If JavaScript numbers are always 64-bit floating point, how does it work with 32-bit machines? - Quora
Answer (1 of 2): Mainly when talking JavaScript we talk a decent CPU, eg x86. Since early days of 8086 its numeric coprocessor supported floating-point types up to 80 bits. So, float (32 bit) and double (64 bits) were supported directly in hardware. So is today when every x86 CPU has hardware flo...
🌐
W3Schools
w3schools.com › js › js_numbers.asp
JavaScript Numbers
Unlike many other programming languages, JavaScript does not define different types of numbers, like integers, short, long, floating-point etc.
🌐
Reddit
reddit.com › r/programminglanguages › float16 vs float32
r/ProgrammingLanguages on Reddit: Float16 vs Float32
August 6, 2019 -

Hi

in my toy language, I want to have a single float type "decimal". I am not sure if I should go with f16 or f32 internally.

I assume f32 will take more memory but in today's world is that even relevant.

I also read somewhere that GPUs don't support f32 and I need to have f16 anyway if I want to use any UI libraries.

At this point, I really am not sure what i should go for. I really want to keep a single floating type. My language is not targetted at IoT devices and performance is one of the goals.