Tag Archives: android

[Android] Fragment 에서 Keyboard 안보이게

setMenuVisibiltiy를 활용하면 된다.
근데 이게 애니메이션으로 안뜨는거라, 뭔가 나는 확 그냥 안보이게 하고 싶은뎅..

    @Override
    public void setMenuVisibility(boolean menuVisible) {
        super.setMenuVisibility(menuVisible);

        if (menuVisible) {
            // Hide Keyboard
            try {
                final InputMethodManager imm = (InputMethodManager) getActivity().getSystemService(Context.INPUT_METHOD_SERVICE);
                imm.hideSoftInputFromWindow(getView().getWindowToken(), 0);
            } catch (NullPointerException e) {
                // Do not hide when getActivity().getSystemService(Context.INPUT_METHOD_SERVICE) is null
            }
        }
    }

[Android] Speech RecognitionListener Interface 분석

별도의 Dialog 없이 Android Speech API 를 사용하려면
RecognitionListener interface를 implements를 해주는게 필수다.

각 메소드의 기능은 다음과 같다.

       speech.setRecognitionListener(new RecognitionListener() {
            private final static String LOG_TAG = "Voice Recognition";

            // Called when the endpointer is ready for the user to start speaking.
            // Speech를 사용할 준비가 되면 호출되는 함수다.
            @Override
            public void onReadyForSpeech(Bundle params) {
                Log.i(LOG_TAG, "onReadyForSpeech");
            }


            // The user has started to speak.
            // 사용자가 말하기 시작할 때 뜬다.
            @Override
            public void onBeginningOfSpeech() {
                Log.i(LOG_TAG, "onBeginningOfSpeech");
                pgBar.setIndeterminate(false);
                pgBar.setMax(10);

            }

            // The sound level in the audio stream has changed.
            // rsmdB is decibel
            // 현재 사용자가 말하는 사운드가 몇 데시벨인지 표시해준다. 
            // 이 데시벨로 프로그레스 바를 표시해주면, 사용자가 얘기하고 있는지를 직관적으로 확인 할 수 있을 것이다.
            @Override
            public void onRmsChanged(float rmsdB) {
                Log.i(LOG_TAG, "onEndOfSpeech");
                pgBar.setProgress((int) rmsdB);
            }

            // More sound has been received.
            // To allow giving feedback to the user regarding the captured audio.
            // There is no guarantee that this method will be called.
            // buffer is single channel audio stream
            // 음성이 추가적으로 들어왔을때, 사용자에게 음성인식에 대한 추가적인 피드백을 줄 수있다. 
            // 근데 이 메소드가 콜된다는 보장이 없다..
            @Override
            public void onBufferReceived(byte[] buffer) {
                Log.i(LOG_TAG, "onBufferReceived: " + buffer);
            }

            // Called after the user stops speaking.
            // just speaking is end. no return.
            // 말이 그냥 끝났다고 판단됐을때 호출된다. 로그찍어보면 알겠지만, 0.1초단위로 언어가 끊겼는지 확인한다.
            @Override
            public void onEndOfSpeech() {
                Log.i(LOG_TAG, "onEndOfSpeech");
                pgBar.setIndeterminate(true);
                speakBtn.setChecked(false);
            }

            // error...
            // 에러가 발생했을때. 에러는 아래 클래스에서 확인 가능하다.
            @Override
            public void onError(int error) {
                String errorMessage = getErrorText(error);
                Log.d(LOG_TAG, "FAILED " + errorMessage);
                text.setText(errorMessage);
            }

            // Final Result!
            // 우리 모두가 꿈꾸던 result 값이 들어왔을때.
            @Override
            public void onResults(Bundle results) {
                Log.i(LOG_TAG, "onResults");
                chnFoodRecognitionAlgorithsm algorithsm = new chnFoodRecognitionAlgorithsm();

                ArrayList<String> matchResult = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
                String textString = "";
                for (String result : matchResult) {
                    //Algorithsm will be here
                    //DB Select will be here.
                    textString += result + " ";
                }
                text.setText(textString);
                Log.i(LOG_TAG, "결과값");
                Log.i(LOG_TAG, textString);
            }

            // Called when partial recognition results are available.
            // The callback might be called at any time between onBeginningOfSpeech() and onResults(Bundle)
            // when partial results are ready.
            // This method may be called zero, one or multiple times for each call to startListening(Intent),
            // depending on the speech recognition service implementation. To request partial results, use EXTRA_PARTIAL_RESULTS
            // EXTRA_PARTIAL_RESULTS 를 쓰면 나타난다.
            // onBeginningOfSpeech(), onResults(Bundle) 언제든 간에, 결과값을 내보낼 준비가 되면 보여진다.
            @Override
            public void onPartialResults(Bundle partialResults) {
                Log.i(LOG_TAG, "onPartialResults");
            }

            // 이후 이벤트를 줄때 사용하는 메소드다.
            // Reserved for adding future events.
            @Override
            public void onEvent(int eventType, Bundle params) {
                Log.i(LOG_TAG, "onEvent");
            }


            // for public void onError(int error)
            public String getErrorText(int errorCode) {
                String message;
                switch (errorCode) {
                    case SpeechRecognizer.ERROR_AUDIO:
                        message = "Audio recording error";
                        break;
                    case SpeechRecognizer.ERROR_CLIENT:
                        message = "Client side error";
                        break;
                    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
                        message = "Insufficient permissions";
                        break;
                    case SpeechRecognizer.ERROR_NETWORK:
                        message = "Network error";
                        break;
                    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
                        message = "Network timeout";
                        break;
                    case SpeechRecognizer.ERROR_NO_MATCH:
                        message = "No match";
                        break;
                    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
                        message = "RecognitionService busy";
                        break;
                    case SpeechRecognizer.ERROR_SERVER:
                        message = "error from server";
                        break;
                    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
                        message = "No speech input";
                        break;
                    default:
                        message = "Didn't understand, please try again.";
                        break;
                }
                return message;
            }
        });

[Android] 음성인식 Sample Project

화면

<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    android:orientation="vertical" >
    <TextView
        android:id="@+id/txt_output"
        android:layout_width="wrap_content"
        android:layout_height="wrap_content"
        android:textSize="26dp"
        android:textStyle="normal"
        android:text="결과"
        android:layout_centerHorizontal="true"
        android:layout_above="@+id/btn_mic" />
    <LinearLayout
        android:layout_width="wrap_content"
        android:layout_height="wrap_content"
        android:layout_alignParentBottom="true"
        android:layout_centerHorizontal="true"
        android:layout_marginBottom="60dp"
        android:gravity="center"
        android:orientation="vertical"
        android:id="@+id/linearLayout">

        <TextView
            android:layout_width="wrap_content"
            android:layout_height="wrap_content"
            android:layout_marginTop="10dp"
            android:text="Speech to text using Google API"
            android:textSize="15dp"
            android:textStyle="normal" />
    </LinearLayout>

    <Button
        android:layout_width="wrap_content"
        android:layout_height="wrap_content"
        android:text="Speak!!"
        android:id="@+id/btn_mic"
        android:layout_above="@+id/linearLayout"
        android:layout_centerHorizontal="true" />

    <ListView
        android:layout_width="wrap_content"
        android:layout_height="wrap_content"
        android:id="@+id/listView"
        android:layout_centerHorizontal="true"
        android:layout_alignParentTop="true"
        android:layout_above="@+id/txt_output" />
</RelativeLayout>

Java

package sds.com.google_voice_test;

import android.content.ActivityNotFoundException;
import android.content.Intent;
import android.speech.RecognizerIntent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ListView;
import android.widget.TextView;
import android.widget.Toast;
import java.util.ArrayList;
import java.util.Locale;

public class MainActivity extends AppCompatActivity {

    private final int SPEECH_RECOGNITION_CODE = 1;
    private TextView txtOutput;
    private Button btnMicrophone;
    private ListView resultList;
    private ArrayAdapter<String> adapter;
    private ArrayList<String> arrayList;

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);
        txtOutput = (TextView) findViewById(R.id.txt_output);
        btnMicrophone = (Button) findViewById(R.id.btn_mic);
        resultList = (ListView) findViewById(R.id.listView);


        btnMicrophone.setOnClickListener(new View.OnClickListener() {
            @Override
            public void onClick(View v) {
                startSpeechToText();
            }
        });
    }

    private void startSpeechToText() {
        Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
        // 언어 설정: 영어
        intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.ENGLISH);
        intent.putExtra(RecognizerIntent.EXTRA_PROMPT, "오늘 아침은 무엇을 먹었나요?");
        try {
            startActivityForResult(intent, SPEECH_RECOGNITION_CODE);
        } catch (ActivityNotFoundException a) {
            Toast.makeText(getApplicationContext(),  "구글 보이스를 사용할 수 없는 디바이스 입니다.", Toast.LENGTH_SHORT).show();
        }
    }
    /**
     * Callback for speech recognition activity
     * */
    @Override
    protected void onActivityResult(int requestCode, int resultCode, Intent data) {
        super.onActivityResult(requestCode, resultCode, data);
        switch (requestCode) {
            case SPEECH_RECOGNITION_CODE: {


                if (resultCode == RESULT_OK && data != null) {

                    ArrayList<String> result = data.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
                    Toast.makeText(getApplicationContext(), String.valueOf(result.size())+"개의 결과가 검색되었습니다.", Toast.LENGTH_LONG).show();
                    String text = result.get(0);
                    txtOutput.setText(text);

                    arrayList = new ArrayList<>();
                    for(String s : result){
                        arrayList.add(s);
                    }

                    adapter = new ArrayAdapter<String>(this, R.layout.support_simple_spinner_dropdown_item, arrayList);
                    resultList.setAdapter(adapter);
                    resultList.setChoiceMode(ListView.CHOICE_MODE_SINGLE);

                }
                break;
            }
        }
    }

}