setup.py<\/code> \u3092\u4f7f\u3063\u305f\u30ab\u30b9\u30bf\u30e0\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u304c\u5fc5\u8981\u306a\u5834\u5408\u3067\u3059\u3002<\/li>\n<\/ul>\n\n\n\n\u4eee\u60f3\u74b0\u5883\u5185\u3067\u30e9\u30a4\u30d6\u30e9\u30ea\u3084CUDA\u306b\u5bfe\u5fdc\u3057\u305fPyTorch\u3092\u30a4\u30f3\u30b9\u30c8\u30fc\u30eb\u3057\u305f\u3089\u3001\u30e1\u30e2\u5e33\u306a\u3069\u3067\u4ee5\u4e0b\u3092\u4ee5\u4e0b\u3092\u8a18\u8ff0\u3057\u307e\u3059\u3002\u3053\u308c\u3092\u300ctest.py\u300d\u3068\u3057\u3066\u3001\u30d5\u30a9\u30eb\u30c0whisper\u306b\u4fdd\u5b58\u3057\u307e\u3059\u3002<\/p>\n\n\n\n
from whisperspeech.pipeline import Pipeline\nimport torchaudio\n\n# Pipeline\u306e\u521d\u671f\u5316\npipe = Pipeline(s2a_ref='collabora\/whisperspeech:s2a-q4-tiny-en+pl.model')\n\n# \u30c6\u30ad\u30b9\u30c8\u3092\u97f3\u58f0\u306b\u5909\u63db\uff08\u3053\u3053\u3067\u6b63\u3057\u3044\u30e1\u30bd\u30c3\u30c9\u3092\u4f7f\u7528\u3059\u308b\uff09\n# \u4f8b\u3048\u3070 `generate` \u30e1\u30bd\u30c3\u30c9\u304c\u3042\u308b\u3068\u4eee\u5b9a\uff08\u5b9f\u969b\u306e\u30e1\u30bd\u30c3\u30c9\u540d\u3092\u78ba\u8a8d\u3057\u3066\u304f\u3060\u3055\u3044\uff09\nresult = pipe.generate(\"\"\"\nThis is the first demo of Whisper Speech, a fully open source text-to-speech model trained by Collabora and Lion on the Juwels supercomputer.\n\"\"\")\n\n# CUDA\u30c6\u30f3\u30bd\u30eb\u3092CPU\u306b\u79fb\u52d5\uff08GPU\u4e0a\u306e\u30c6\u30f3\u30bd\u30eb\u306e\u5834\u5408\uff09\nresult = result.cpu()\n\n# \u97f3\u58f0\u30c7\u30fc\u30bf\u3092WAV\u30d5\u30a1\u30a4\u30eb\u3068\u3057\u3066\u4fdd\u5b58\ntorchaudio.save('output.wav', result, sample_rate=22050)<\/code><\/pre>\n\n\n\n