Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Additional SSH service changes #2881

Merged
merged 3 commits into from
Oct 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion services/ssh/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ ENV TMPDIR=/tmp \
ENV LAGOON=ssh \
OC_VERSION=v3.11.0 \
OC_HASH=0cbc58b \
OC_SHA256=4b0f07428ba854174c58d2e38287e5402964c9a9355f6c359d1242efd0990da3
OC_SHA256=4b0f07428ba854174c58d2e38287e5402964c9a9355f6c359d1242efd0990da3 \
KUBECTL_VERSION=v1.20.0

COPY services/ssh/libnss-mysql-1.5.tar.gz /tmp/libnss-mysql-1.5.tar.gz

Expand All @@ -51,6 +52,10 @@ RUN mkdir -p /openshift-origin-client-tools && \
tar -xzf /tmp/openshift-origin-client-tools.tar -C /tmp/openshift-origin-client-tools --strip-components=1 && \
install /tmp/openshift-origin-client-tools/oc /usr/bin/oc && rm -rf /tmp/openshift-origin-client-tools && rm -rf /tmp/openshift-origin-client-tools.tar

RUN curl -Lo kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/bin/kubectl

RUN curl -L https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini

# Reproduce behavior of Alpine: Run Bash as sh
Expand Down
210 changes: 82 additions & 128 deletions services/ssh/home/rsh.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ shift 3
# get the value from an envvar override (can be added to the ssh deployment)
# default to false so we don't hold up the ssh for a long time
WAIT_TO_UNIDLE_SERVICES=${WAIT_TO_UNIDLE_SERVICES:-false}
# set a timeout of 600 for waiting for a pod to start (the waits are 1 second interval, so 10 minutes timeout)
SSH_CHECK_TIMEOUT=${SSH_CHECK_TIMEOUT:-600}
# set a timeout of 120 for waiting for a pod to start (the waits are 5 second interval, so 10 minutes timeout)
SSH_CHECK_TIMEOUT=${SSH_CHECK_TIMEOUT:-120}

# generate a random uuid for this request to help track in logs
# also the uuid will be given to users in any errors so they can provide it to help with tracking too if required
Expand Down Expand Up @@ -69,6 +69,13 @@ fi
ADMIN_BEARER="Authorization: bearer $API_ADMIN_TOKEN"
ADMIN_GRAPHQL="query getEnvironmentByOpenshiftProjectName {
environmentByOpenshiftProjectName(openshiftProjectName: \"$PROJECT\") {
project {
openshift {
consoleUrl
token
name
}
}
openshift {
consoleUrl
token
Expand All @@ -80,9 +87,15 @@ ADMIN_GRAPHQL="query getEnvironmentByOpenshiftProjectName {
ADMIN_QUERY=$(echo $ADMIN_GRAPHQL | sed 's/"/\\"/g' | sed 's/\\n/\\\\n/g' | awk -F'\n' '{if(NR == 1) {printf $0} else {printf "\\n"$0}}')
ADMIN_ENVIRONMENT=$(curl -s -XPOST -H 'Content-Type: application/json' -H "$ADMIN_BEARER" "${GRAPHQL_ENDPOINT:-api:3000/graphql}" -d "{\"query\": \"$ADMIN_QUERY\"}")

OPENSHIFT_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.consoleUrl')
OPENSHIFT_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.token')
OPENSHIFT_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.name')
CLUSTER_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.consoleUrl')
CLUSTER_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.token')
CLUSTER_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.name')
# if no cluster is found at the environment level (introduced in lagoon2.1) then grab what is at the project level
if [[ -z ${CLUSTER_NAME} ]]; then
CLUSTER_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.project.openshift.consoleUrl')
CLUSTER_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.project.openshift.token')
CLUSTER_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.project.openshift.name')
fi

##
## Check if we have a service and container given, if yes use them.
Expand All @@ -100,53 +113,40 @@ else
SERVICE=cli
fi

echo "${UUID}: Incoming Remote Shell Connection: project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}' container='${CONTAINER}' command='$*'" >> /proc/1/fd/1
echo "${UUID}: Incoming Remote Shell Connection: project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}' container='${CONTAINER}' command='$*'" >> /proc/1/fd/1

# This only happens on local development with minishift.
# Login as developer:deveeloper and get the token
if [[ $OPENSHIFT_TOKEN == "null" ]]; then
KUBECONFIG="/tmp/kube" /usr/bin/oc --insecure-skip-tls-verify login -p developer -u developer ${OPENSHIFT_CONSOLE} > /dev/null
OPENSHIFT_TOKEN=$(KUBECONFIG="/tmp/kube" oc --insecure-skip-tls-verify whoami -t)
if [[ $CLUSTER_TOKEN == "null" ]]; then
KUBECONFIG="/tmp/kube" /usr/bin/oc --insecure-skip-tls-verify login -p developer -u developer ${CLUSTER_CONSOLE} > /dev/null
CLUSTER_TOKEN=$(KUBECONFIG="/tmp/kube" oc --insecure-skip-tls-verify whoami -t)
fi

OC="/usr/bin/oc --insecure-skip-tls-verify -n ${PROJECT} --token=${OPENSHIFT_TOKEN} --server=${OPENSHIFT_CONSOLE} "

# If there is a deploymentconfig for the given service
if [[ $($OC get deploymentconfigs -l service=${SERVICE} 2> /dev/null) ]]; then
DEPLOYMENTCONFIG=$($OC get deploymentconfigs -l service=${SERVICE} -o name)
# If the deploymentconfig is scaled to 0, scale to 1
if [[ $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.replicas}}') == "0" ]]; then
echo "${UUID}: Attempting to scale deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
$OC scale --replicas=1 ${DEPLOYMENTCONFIG} >/dev/null 2>&1
OC="/usr/bin/oc --insecure-skip-tls-verify -n ${PROJECT} --token=${CLUSTER_TOKEN} --server=${CLUSTER_CONSOLE} "
KUBECTL="/usr/bin/kubectl --insecure-skip-tls-verify -n ${PROJECT} --token=${CLUSTER_TOKEN} --server=${CLUSTER_CONSOLE} "

# Wait until the scaling is done
while [[ ! $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.readyReplicas}}') == "1" ]]
do
sleep 1
done
fi
echo "${UUID}: Deployment is running deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
fi
IS_KUBERNETES=false

# If there is a deployment for the given service searching for lagoon.sh labels
if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; then
if [[ $($KUBECTL get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; then
IS_KUBERNETES=true
# get any other deployments that may have been idled by the idler and unidle them if required
# this only needs to be done for kubernetes
# we do this first to give the services a bit of time to unidle before starting the one that was requested
DEPLOYMENTS=$($OC get deployments -l "idling.amazee.io/watch=true" -o name)
DEPLOYMENTS=$($KUBECTL get deployments -l "idling.amazee.io/watch=true" -o name)
if [ ! -z "${DEPLOYMENTS}" ]; then
echo "${UUID}: Environment is idled attempting to scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
echo "${UUID}: Environment is idled attempting to scale up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
# loop over the deployments and unidle them
for DEP in ${DEPLOYMENTS}
do
# if the deployment is idled, unidle it :)
DEP_JSON=$($OC get ${DEP} -o json)
DEP_JSON=$($KUBECTL get ${DEP} -o json)
if [ $(echo "$DEP_JSON" | jq -r '.status.replicas // 0') == "0" ]; then
REPLICAS=$(echo "$DEP_JSON" | jq -r '.metadata.annotations."idling.amazee.io/unidle-replicas" // 1')
if [ ! -z "$REPLICAS" ]; then
REPLICAS=1
fi
echo "${UUID}: Attempting to scale deployment='${DEP}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
echo "${UUID}: Attempting to scale deployment='${DEP}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
$OC scale --replicas=${REPLICAS} ${DEP} >/dev/null 2>&1
fi
done
Expand All @@ -158,132 +158,72 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t
# WAIT_TO_UNIDLE_SERVICES will default to false so that it just scales the deployments
# and won't wait for them to be ready, but if set to true, it will wait for `readyReplicas` to be 1
if [[ "$WAIT_TO_UNIDLE_SERVICES" =~ [Tt][Rr][Uu][Ee] ]]; then
echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
SSH_CHECK_COUNTER=0
until [[ $($OC get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]]
until [[ $($KUBECTL get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]]
do
if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then
let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1
sleep 1
sleep 5
else
echo "${UUID}: Deployment '${DEP}' took too long to start pods"
exit 1
fi
done
echo "${UUID}: Environment scaled up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
echo "${UUID}: Environment scaled up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
fi
done
fi
# then actually unidle the service that was requested and wait for it to be ready if it wasn't already captured above
# doing this means if the service hasn't been idled with the `idling.amazee.io/watch=true` label
# we can still establish a connection
DEPLOYMENT=$($OC get deployment -l "lagoon.sh/service=${SERVICE}" -o name)
DEPLOYMENT=$($KUBECTL get deployment -l "lagoon.sh/service=${SERVICE}" -o name)
# If the deployment is scaled to 0, scale to 1
# .status.replicas doesn't exist on a scaled to 0 deployment in k8s so assume it is 0 if nothing is returned
if [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then
echo "${UUID}: Attempting to scale deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
if [[ $($KUBECTL get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then
echo "${UUID}: Attempting to scale deployment='${DEPLOYMENT}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
$OC scale --replicas=1 ${DEPLOYMENT} >/dev/null 2>&1
fi
# Wait until the scaling is done
SSH_CHECK_COUNTER=0
until [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]]
until [[ $($KUBECTL get ${DEPLOYMENT} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]]
do
if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then
let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1
sleep 1
sleep 5
else
echo "${UUID}: Pod for ${SERVICE} took too long to start"
exit 1
fi
done
echo "${UUID}: Deployment is running deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
echo "${UUID}: Deployment is running deployment='${DEPLOYMENT}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
fi

# If there is a deployment for the given service search for lagoon labels
# @DEPRECATED: Remove with Lagoon 2.0.0
if [[ $($OC get deployment -l lagoon/service=${SERVICE} 2> /dev/null) ]]; then
# get any other deployments that may have been idled by the idler and unidle them if required
# this only needs to be done for kubernetes
# we do this first to give the services a bit of time to unidle before starting the one that was requested
DEPLOYMENTS=$($OC get deployments -l "idling.amazee.io/watch=true" -o name)
if [ ! -z "${DEPLOYMENTS}" ]; then
echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
# loop over the deployments and unidle them
for DEP in ${DEPLOYMENTS}
do
# if the deployment is idled, unidle it :)
DEP_JSON=$($OC get ${DEP} -o json)
if [ $(echo "$DEP_JSON" | jq -r '.status.replicas // 0') == "0" ]; then
REPLICAS=$(echo "$DEP_JSON" | jq -r '.metadata.annotations."idling.amazee.io/unidle-replicas" // 1')
if [ ! -z "$REPLICAS" ]; then
REPLICAS=1
fi
echo "${UUID}: Attempting to scale deployment='${DEP}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
$OC scale --replicas=${REPLICAS} ${DEP} >/dev/null 2>&1
fi
done
# then if we have to wait for them to start, do that here
for DEP in ${DEPLOYMENTS}
do
# for unidling an entire environment and waiting for the number of `readyReplicas`
# to be 1 for each deployment, could add considerable delays for the ssh connection to establish.
# WAIT_TO_UNIDLE_SERVICES will default to false so that it just scales the deployments
# and won't wait for them to be ready, but if set to true, it will wait for `readyReplicas` to be 1
if [[ "$WAIT_TO_UNIDLE_SERVICES" =~ [Tt][Rr][Uu][Ee] ]]; then
echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
SSH_CHECK_COUNTER=0
until [[ $($OC get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]]
do
if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then
let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1
sleep 1
else
echo "${UUID}: Deployment '${DEP}' took too long to start pods"
exit 1
fi
done
echo "${UUID}: Environment scaled up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
fi
done
fi
# then actually unidle the service that was requested and wait for it to be ready if it wasn't already captured above
# doing this means if the service hasn't been idled with the `idling.amazee.io/watch=true` label
# we can still establish a connection
DEPLOYMENT=$($OC get deployment -l lagoon/service=${SERVICE} -o name)
# If the deployment is scaled to 0, scale to 1
# .status.replicas doesn't exist on a scaled to 0 deployment in k8s so assume it is 0 if nothing is returned
if [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then
echo "${UUID}: Attempting to scale up deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
$OC scale --replicas=1 ${DEPLOYMENT} >/dev/null 2>&1
fi
# Wait until the scaling is done
SSH_CHECK_COUNTER=0
until [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]]
do
if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then
let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1
sleep 1
else
echo "${UUID}: Pod for ${SERVICE} took too long to start"
exit 1
# If there is a deploymentconfig for the given service, then it isn't kubernetes, it is openshift.
if [[ "${IS_KUBERNETES}" == "false" ]]; then
if [[ $($OC get deploymentconfigs -l service=${SERVICE} 2> /dev/null) ]]; then
DEPLOYMENTCONFIG=$($OC get deploymentconfigs -l service=${SERVICE} -o name)
# If the deploymentconfig is scaled to 0, scale to 1
if [[ $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.replicas}}') == "0" ]]; then
echo "${UUID}: Attempting to scale deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
$OC scale --replicas=1 ${DEPLOYMENTCONFIG} >/dev/null 2>&1

# Wait until the scaling is done
while [[ ! $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.readyReplicas}}') == "1" ]]
do
sleep 1
done
fi
done
echo "${UUID}: Deployment is running deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
echo "${UUID}: Deployment is running deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
fi
fi


echo "${UUID}: Getting pod name for exec for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
POD=$($OC get pods -l service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')

# Check for newer Helm chart "lagoon.sh" labels
if [[ ! $POD ]]; then
POD=$($OC get pods -l "lagoon.sh/service=${SERVICE}" -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
fi

# Check for deprecated Helm chart "lagoon" labels
# @DEPRECATED: Remove with Lagoon 2.0.0
if [[ ! $POD ]]; then
POD=$($OC get pods -l lagoon/service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
echo "${UUID}: Getting pod name for exec for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
if [[ "${IS_KUBERNETES}" == "true" ]]; then
POD=$($KUBECTL get pods -l "lagoon.sh/service=${SERVICE}" -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
else
POD=$($OC get pods -l service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
fi

if [[ ! $POD ]]; then
Expand All @@ -293,7 +233,11 @@ fi

# If no container defined, load the name of the first container
if [[ -z ${CONTAINER} ]]; then
CONTAINER=$($OC get pod ${POD} -o json | jq --raw-output '.spec.containers[0].name')
if [[ "${IS_KUBERNETES}" == "true" ]]; then
CONTAINER=$($KUBECTL get pod ${POD} -o json | jq --raw-output '.spec.containers[0].name')
else
CONTAINER=$($OC get pod ${POD} -o json | jq --raw-output '.spec.containers[0].name')
fi
fi

if [ -t 1 ]; then
Expand All @@ -302,9 +246,19 @@ else
TTY_PARAMETER=""
fi

echo "${UUID}: Exec to pod='${POD}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1
if [[ -z "$*" ]]; then
exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh
echo "${UUID}: Exec to pod='${POD}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1

if [[ "${IS_KUBERNETES}" == "true" ]]; then
if [[ -z "$*" ]]; then
exec $KUBECTL exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh
else
exec $KUBECTL exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh -c "$*"
fi
else
exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh -c "$*"
fi
if [[ -z "$*" ]]; then
exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh
else
exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh -c "$*"
fi

fi