type: com.google.api.codegen.samplegen.v1p2.SampleConfigProto schema_version: 1.2.0 samples: - region_tag: speech_transcribe_recognition_metadata_beta title: Adding recognition metadata (Local File) (Beta) description: Adds additional details short audio file included in this recognition request rpc: Recognize service: google.cloud.speech.v1p1beta1.Speech request: - field: audio.content value: "resources/commercial_mono.wav" input_parameter: local_file_path comment: Path to local audio file, e.g. /path/audio.wav value_is_file: true - field: config.metadata.interaction_type value: VOICE_SEARCH comment: The use case of the audio, e.g. PHONE_CALL, DISCUSSION, PRESENTATION, et al. - field: config.metadata.recording_device_type value: SMARTPHONE comment: The kind of device used to capture the audio - field: config.metadata.recording_device_name value: "Pixel 3" comment: | The device used to make the recording. Arbitrary string, e.g. 'Pixel XL', 'VoIP', 'Cardioid Microphone', or other value. - field: config.language_code value: "en-US" comment: | The language of the supplied audio. Even though additional languages are provided by alternative_language_codes, a primary language is still required. response: - loop: variable: result collection: $resp.results body: - comment: - First alternative is the most probable result - define: alternative = result.alternatives[0] - print: - "Transcript: %s" - alternative.transcript