ios - 每 2 分钟记录一次用户语音和清晰的文本文件

标签 ios swift text-files speech-recognition

我是 swift 新手,我正在从事记录用户声音的项目,并每两分钟将声音文件转换为文本文件。我使用计时器每 2 分钟重复一次该步骤。

问题是第二次通话时录音机被禁用。另外,文本文件并没有清除为下次调用准备的内容。

这是完整的代码。

import UIKit
import Speech
import AVFoundation



class ViewController: UIViewController {
var audioRecorder:AVAudioRecorder!
var inString = ""
let fileName = "Test"
  var str=""
appropriateFor: nil, create: true)
let recordSettings = [AVSampleRateKey : NSNumber(value: Float(44100.0)),
                      AVFormatIDKey : NSNumber(value: Int32(kAudioFormatMPEG4AAC)),
                      AVNumberOfChannelsKey : NSNumber(value: Int32(1)),
                      AVEncoderAudioQualityKey : NSNumber(value: Int32(AVAudioQuality.high.rawValue))]
var  timer = Timer()

override func viewDidLoad() {
    super.viewDidLoad()
    // Do any additional setup after loading the view, typically from a nib.
    var audioSession = AVAudioSession.sharedInstance()
    do {
        try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
        try audioRecorder = AVAudioRecorder(url: directoryURL()!, settings: recordSettings)
        audioRecorder.prepareToRecord()
    } catch {

        print("error")
    }
    audioSession = AVAudioSession.sharedInstance()
    do {
        try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
        requestSpeechAuth()
    } catch {}
    timer = Timer.scheduledTimer(timeInterval: 120, target: self, selector: #selector (ViewController.stopAudio), userInfo: nil, repeats: true)
}

@objc func stopAudio() {
    audioRecorder.stop()
    let audioSession = AVAudioSession.sharedInstance()
    do {
        try audioSession.setActive(false)

        let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "ar_SA"))
        let request = SFSpeechURLRecognitionRequest(url: audioRecorder.url)
        recognizer?.recognitionTask(with: request) { (result, error) in
            if let error = error {
                print("There was an error: \(error)")
            } else {
                let dir = try? FileManager.default.url(for: .documentDirectory,
                                                       in: .userDomainMask, appropriateFor: nil, create: true)
                if let fileURL = dir?.appendingPathComponent(self.fileName).appendingPathExtension("txt") {
                    do {
                        self.str=""
                        self.str = (result?.bestTranscription.formattedString)!
                        try self.str.write(to: fileURL, atomically: true, encoding: .utf8)
                    } catch {
                        print("Failed writing to URL: \(fileURL), Error: " + error.localizedDescription)
                    }
                    do {
                        self.inString = try String(contentsOf: fileURL)
                    } catch {
                        print("Failed reading from URL: \(fileURL), Error: " + error.localizedDescription)
                    }
                    self.getIqama(fileN: self.inString,status: self.str)
                }
            }//end elsd

        } //end result

    } catch {} //end do for false
    // requestSpeechAuth()
}

func directoryURL() -> URL? {
    let fileManager = FileManager.default
    let urls = fileManager.urls(for: .documentDirectory, in: .userDomainMask)
    let documentDirectory = urls[0] as URL
    let soundURL = documentDirectory.appendingPathComponent("AqimAlsalat.m4a")
    return soundURL
}






func getIqama(fileN : String, status:String)
{

    var st: String!
  st = "السلام عليكم ورحمة الله السلام عليكم ورحمة الله"

    let st1 : String!
    st1 = String (fileN)
    print(st1)
    if st1 == st {

        // audioEngine.stop()
        //speechRecognitionRequest?.endAudio()
        print(st1)
          print("JJalal")
    }
    else {
        print("Dalal")
        print(fileN)
    }
}

func requestSpeechAuth(){
    SFSpeechRecognizer.requestAuthorization { authStatus in
        if authStatus == SFSpeechRecognizerAuthorizationStatus.authorized {
            let audioSession = AVAudioSession.sharedInstance()
            do {
                try audioSession.setActive(true)
                self.audioRecorder.record()
            } catch {}
        }
    }
}



override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
    // Dispose of any resources that can be recreated.
}

}

有什么建议或想法吗?

谢谢

最佳答案

停止录音后似乎需要再次调用self.audioRecorder.record()才能将声音文件转换为文本。 Apple docs说调用 record() 将创建或删除音频文件,这样应该可以解决您的问题。

但是,您可能会遇到另一个问题,即在转录文本时错过一段录音。您可以考虑通过在两个录音机之间来回切换来解决该问题,或者您可以尝试更改录音机的文件位置(或更改前一个文件的位置),然后再开始录音。

关于ios - 每 2 分钟记录一次用户语音和清晰的文本文件,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49379470/

相关文章:

ios - Swift - 运算符 == 不能应用于两个 Int 操作数

java - 使用 FileReader 只显示文本文件的最后一行?

iphone - UIView 大小不接受大于 17000 像素的大小

iOS 9.3 无法使用在 iPad 上运行的 iPhone App 从照片库获取缩略图

ios - 多用户应用程序的自动续订订阅

ios - swift 中的动态类加载

java - 安卓/java : String url does not work in HttpGet(url)

c++ - 比较和拆分文本文件中的字符串

ios - 为什么 setNavigationBarHidden 有不同的动画?

ios - 将 UIImage 绘制为 MKMapView 的 subview 与将其绘制为 map View 中的注释相比,对 CPU 的压力更小。为什么?