{ // Base hyperparams for a simple model model_params: { decoder_mlp: { hidden_layers: [1024, 512, 256], dropout: 0.2, activation: "relu", norm: null, } }, // Let's make a helper function to help us with the sweep. // Like numpy's arange, create a list with a range of float values arange: (start, stop, step) => { // Name an internal computation in an anonymous scope done: if(step > 0, start >= stop, start <= stop), // Recursive calls allow building any necessary helpers result: if(done, [], [start] + arange(start + step, stop, step)), }.result, // Create a list that contains a copy of model_params for each value in our sweep experiments: map( // & is the patch operator, letting us override just the parts we want to change (_dropout) => model_params + {decoder_mlp: &{dropout: _dropout}}, arange(0, 0.4, 0.01), ), // We don't need to expose any of the other junk to our program! // The data hides it in this anonymous scope and only produces the output. }.experiments