openstack team mailing list archive
-
openstack team
-
Mailing list archive
-
Message #19289
Re: CRITICAL nova [-] [Errno 98] Address already in use
Hi,
maybe this will shed some light on it..?
Thanks,
Andrew
[root@blade02 init.d]# cat /etc/nova/api-paste.ini
############
# Metadata #
############
[composite:metadata]
use = egg:Paste#urlmap
/: meta
[pipeline:meta]
pipeline = ec2faultwrap logrequest metaapp
[app:metaapp]
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/services/Cloud: ec2cloud
[composite:ec2cloud]
use = call:nova.api.auth:pipeline_factory
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
[filter:ec2faultwrap]
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:ec2keystoneauth]
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[filter:validator]
paste.filter_factory = nova.api.ec2:Validator.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
#############
# Openstack #
#############
[composite:osapi_compute]
use = call:nova.api.openstack.urlmap:urlmap_factory
/: oscomputeversions
/v1.1: openstack_compute_api_v2
/v2: openstack_compute_api_v2
[composite:osapi_volume]
use = call:nova.api.openstack.urlmap:urlmap_factory
/: osvolumeversions
/v1: openstack_volume_api_v1
[composite:openstack_compute_api_v2]
use = call:nova.api.auth:pipeline_factory
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
[composite:openstack_volume_api_v1]
use = call:nova.api.auth:pipeline_factory
noauth = faultwrap sizelimit noauth ratelimit osapi_volume_app_v1
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_volume_app_v1
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
[filter:sizelimit]
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
[app:osapi_compute_app_v2]
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
[pipeline:oscomputeversions]
pipeline = faultwrap oscomputeversionapp
[app:osapi_volume_app_v1]
paste.app_factory = nova.api.openstack.volume:APIRouter.factory
[app:oscomputeversionapp]
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
[pipeline:osvolumeversions]
pipeline = faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
admin_tenant_name = service
admin_user = nova
admin_password = x7deix7dei
auth_uri = http://controller:5000/
On Dec 10, 2012, at 7:10 PM, Vishvananda Ishaya wrote:
> Odd. This looks remarkably like it is trying to start osapi_volume even though you don't have it specified in enabled apis. Your enabled_apis setting looks correct to me.
>
> Vish
>
>
> On Dec 10, 2012, at 9:24 AM, Andrew Holway <a.holway@xxxxxxxxxxxx> wrote:
>
>> Hi,
>>
>> I cannot start the nova-api service.
>>
>> [root@blade02 07-openstack-controller]# nova list
>> ERROR: ConnectionRefused: '[Errno 111] Connection refused'
>>
>> I followed this guide very carefully:
>>
>> https://github.com/beloglazov/openstack-centos-kvm-glusterfs/#07-openstack-controller-controller
>>
>> Here is api.log
>>
>> 2012-12-10 17:51:31 DEBUG nova.wsgi [-] Loading app metadata from /etc/nova/api-paste.ini from (pid=2536) load_app /usr/lib/python2.6/site-packages/nova/wsgi.py:371
>> 2012-12-10 17:51:31 CRITICAL nova [-] [Errno 98] Address already in use
>> 2012-12-10 17:51:31 TRACE nova Traceback (most recent call last):
>> 2012-12-10 17:51:31 TRACE nova File "/usr/bin/nova-api", line 50, in <module>
>> 2012-12-10 17:51:31 TRACE nova server = service.WSGIService(api)
>> 2012-12-10 17:51:31 TRACE nova File "/usr/lib/python2.6/site-packages/nova/service.py", line 584, in __init__
>> 2012-12-10 17:51:31 TRACE nova port=self.port)
>> 2012-12-10 17:51:31 TRACE nova File "/usr/lib/python2.6/site-packages/nova/wsgi.py", line 72, in __init__
>> 2012-12-10 17:51:31 TRACE nova self._socket = eventlet.listen((host, port), backlog=backlog)
>> 2012-12-10 17:51:31 TRACE nova File "/usr/lib/python2.6/site-packages/eventlet/convenience.py", line 38, in listen
>> 2012-12-10 17:51:31 TRACE nova sock.bind(addr)
>> 2012-12-10 17:51:31 TRACE nova File "<string>", line 1, in bind
>> 2012-12-10 17:51:31 TRACE nova error: [Errno 98] Address already in use
>> 2012-12-10 17:51:31 TRACE nova
>> 2012-12-10 17:51:31 INFO nova.service [-] Parent process has died unexpectedly, exiting
>> 2012-12-10 17:51:31 INFO nova.service [-] Parent process has died unexpectedly, exiting
>> 2012-12-10 17:51:31 INFO nova.wsgi [-] Stopping WSGI server.
>> 2012-12-10 17:51:31 INFO nova.wsgi [-] Stopping WSGI server.
>>
>> [root@blade02 07-openstack-controller]# cat /etc/nova/nova.conf
>> [DEFAULT]
>> logdir = /var/log/nova
>> state_path = /var/lib/nova
>> lock_path = /var/lib/nova/tmp
>> volumes_dir = /etc/nova/volumes
>> dhcpbridge = /usr/bin/nova-dhcpbridge
>> dhcpbridge_flagfile = /etc/nova/nova.conf
>> force_dhcp_release = False
>> injected_network_template = /usr/share/nova/interfaces.template
>> libvirt_nonblocking = True
>> libvirt_inject_partition = -1
>> network_manager = nova.network.manager.FlatDHCPManager
>> iscsi_helper = tgtadm
>> sql_connection = mysql://nova:x7deix7dei@controller/nova
>> compute_driver = libvirt.LibvirtDriver
>> firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
>> rpc_backend = nova.openstack.common.rpc.impl_qpid
>> rootwrap_config = /etc/nova/rootwrap.conf
>> verbose = True
>> auth_strategy = keystone
>> qpid_hostname = controller
>> network_host = compute1
>> fixed_range = 10.0.0.0/24
>> flat_interface = eth1
>> flat_network_bridge = br100
>> public_interface = eth1
>> glance_host = controller
>> vncserver_listen = 0.0.0.0
>> vncserver_proxyclient_address = controller
>> novncproxy_base_url = http://37.123.104.3:6080/vnc_auto.html
>> xvpvncproxy_base_url = http://37.123.104.3:6081/console
>> metadata_host = 10.141.6.2
>> enabled_apis=ec2,osapi_compute,metadata
>>
>> #[keystone_authtoken]
>> admin_tenant_name = %SERVICE_TENANT_NAME%
>> admin_user = %SERVICE_USER%
>> admin_password = %SERVICE_PASSWORD%
>> auth_host = 127.0.0.1
>> auth_port = 35357
>> auth_protocol = http
>> signing_dirname = /tmp/keystone-signing-nova
>>
>> There is no process using port 8774.
>>
>> [root@blade02 07-openstack-controller]# netstat -tunlp | grep 877
>> tcp 0 0 0.0.0.0:8775 0.0.0.0:* LISTEN 2157/python
>>
>> Maybe it is something similar to:
>>
>> https://bugzilla.redhat.com/show_bug.cgi?id=877606#c3
>>
>> Thanks,
>>
>> Andrew
>>
>>
>>
>>
>> _______________________________________________
>> Mailing list: https://launchpad.net/~openstack
>> Post to : openstack@xxxxxxxxxxxxxxxxxxx
>> Unsubscribe : https://launchpad.net/~openstack
>> More help : https://help.launchpad.net/ListHelp
>
Follow ups
References