1.1.0 • Published 9 months ago

@kne-components/speech-text v1.1.0

Weekly downloads
-
License
ISC
Repository
github
Last release
9 months ago

speech-text

安装

npm i --save @kne-components/speech-text

示例(全屏)

示例样式

.ant-card {
  border-color: black;
  text-align: center;
  width: 200px;
}

示例代码

const {default: speech} = _SpeechText;
const {Button, Alert, Flex} = antd;
const {useState, useEffect, useRef} = React;

const BaseExample = () => {
    const [message, setMessage] = useState({type: 'info', message: '尚未开始'});
    const [recording, setRecording] = useState(false);
    const recordRef = useRef(null);
    useEffect(() => {
        recordRef.current = speech({url: 'https://ct.deeperagi.com/action/papi/ai/vCMA01/uploadWavFile'});
    }, []);
    return <Flex vertical gap={10}>
        <Alert type={message.type} message={message.message}/>
        <div>
            <Button onClick={() => {
                recordRef.current.then(async ({start, stop}) => {
                    setMessage({type: 'warning', message: '正在识别,请稍等'});
                    if (recording) {
                        const {data} = await stop();
                        if (data.code === 200) {
                            setMessage({type: 'success', message: data.message || '未识别到语音内容'});
                        } else {
                            setMessage({type: 'error', message: '转换错误'});
                        }
                    } else {
                        setMessage({type: 'warning', message: '开始语音识别'});
                        start();
                    }
                    setRecording(!recording);
                });
            }}>{recording ? '正在录制' : '点击开始'}</Button>
        </div>
    </Flex>;
};

render(<BaseExample/>);
const {speechTextRealTime} = _SpeechText;
const {Button, Alert, Flex} = antd;
const {default: axios} = _axios;
const {useState, useEffect, useRef} = React;

const BaseExample = () => {
    const [message, setMessage] = useState({type: 'info', message: '尚未开始'});
    const [recording, setRecording] = useState(false);
    const recordRef = useRef(null);
    useEffect(() => {
        recordRef.current = speechTextRealTime({
            getToken: async () => {
                try {
                    const {data} = await axios({
                        url: 'https://ct.deeperagi.com/action/papi/ai/vCMA02/createToken',
                        method: 'POST',
                        data: JSON.stringify({
                            "avgtype": "11111"
                        }),
                        headers: {
                            'content-type': 'application/json'
                        }
                    });
                    return {
                        token: data.token, appKey: data.appKey
                    };
                } catch (e) {
                    return {
                        "appKey": "TYcsiL5CZb9hd9DR", "token": "e80b7d7f6f054f91a79a14a67cb7f34c"
                    };
                }
            }, onChange: ({message}) => {
                setMessage({type: 'success', message});
            }
        });
    }, []);

    return <Flex vertical gap={10}>
        <Alert type={message.type} message={message.message}/>
        <div>
            <Button onClick={() => {
                recordRef.current.then(async ({start, stop}) => {
                    setMessage({type: 'warning', message: '正在识别,请稍等'});
                    if (recording) {
                        await stop();
                        setMessage({type: 'info', message: '识别结束'});
                    } else {
                        setMessage({type: 'warning', message: '开始语音识别'});
                        start();
                    }
                    setRecording(!recording);
                });
            }}>{recording ? '正在录制' : '点击开始'}</Button>
        </div>
    </Flex>;
};

render(<BaseExample/>);

API

默认导出 speech(options):Promise

上传语音文件识别

example:

const {start, stop} = await speech(options);

options:Object

属性名说明类型默认值
url上传文件语音识别目标接口地址string-

开始录音 start():Promise

example:

await start();

结束录音 stop():Promise

example:

const response = await stop();
const {code, message} = response.data;
属性名说明类型默认值
code后端接口返回状态值,200为成功number-
message语音转换结果string-

speechTextRealTime(options):Promise

实时语音识别

example:

const {start, stop} = await speechTextRealTime(options);

options:Object

属性名说明类型默认值
getToken获取Token方法:getToken():{token,appKey}function-
onChange识别文本内容发生变化时回调函数function({message}) => {console.log(message);}
getGatewayUrl获取WebSocket的url地址: getGatewayUrl({token}):url,可以获取到token参数function-
onComplete录音结束回调方法function-
url保存录音文件urlstring-

开始录音 start():Promise

example:

await start({
    getToken: () => {
    },
    onChange: ({message}) => {
    },
    onComplete: ({file, taskId, messageId, message, chunks}) => {
    }
});

结束录音 stop():Promise

example:

await stop();
1.1.0

9 months ago

1.0.0

9 months ago

0.1.2

9 months ago

0.1.2-alpha.1

9 months ago

0.1.2-alpha.0

9 months ago

0.1.1

9 months ago

0.1.0

9 months ago