-
Scenario DescriptionI am exploring the topic of network isolation in Podman containers on macOS using podman 5.2.2. When I When I Based on my current understanding of the information from Below are the command outputs I used for testing, and my environment info. # podman network info
root@localhost:~# podman network inspect podman
[
{
"name": "podman",
"id": "2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9",
"driver": "bridge",
"network_interface": "podman0",
"created": "2024-09-21T23:41:20.673058802+08:00",
"subnets": [
{
"subnet": "10.88.0.0/16",
"gateway": "10.88.0.1"
}
],
"ipv6_enabled": false,
"internal": false,
"dns_enabled": false,
"ipam_options": {
"driver": "host-local"
},
"containers": {}
}
]
# testing ping to some random ip typed outside podman machine (failed to reach)
∅ /opt/podman ❯ ping 100.123.123.123
PING 100.123.123.123 (100.123.123.123): 56 data bytes
Request timeout for icmp_seq 0
Request timeout for icmp_seq 1
Request timeout for icmp_seq 2
Request timeout for icmp_seq 3
Request timeout for icmp_seq 4
^C
--- 100.123.123.123 ping statistics ---
6 packets transmitted, 0 packets received, 100.0% packet loss
# testing ping to some random ip typed inside podman machine (succeed to reach!?)
root@localhost:~# ping 100.123.123.123
PING 100.123.123.123 (100.123.123.123) 56(84) bytes of data.
64 bytes from 100.123.123.123: icmp_seq=1 ttl=64 time=1.88 ms
64 bytes from 100.123.123.123: icmp_seq=2 ttl=64 time=0.535 ms
64 bytes from 100.123.123.123: icmp_seq=3 ttl=64 time=0.933 ms
64 bytes from 100.123.123.123: icmp_seq=4 ttl=64 time=1.47 ms
^C
--- 100.123.123.123 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3031ms
rtt min/avg/max/mdev = 0.535/1.203/1.880/0.511 ms
# testing ping to some ip inside the subnet in podman machine (succeed to reach!?)
root@localhost:~# ping 10.88.0.3
PING 10.88.0.2 (10.88.0.3) 56(84) bytes of data.
64 bytes from 10.88.0.3: icmp_seq=1 ttl=64 time=1.03 ms
64 bytes from 10.88.0.3: icmp_seq=2 ttl=64 time=1.37 ms
64 bytes from 10.88.0.3: icmp_seq=3 ttl=64 time=1.22 ms
^C
--- 10.88.0.4 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2012ms
rtt min/avg/max/mdev = 1.026/1.205/1.367/0.139 ms
# testing ping to some ip inside the subnet in podman machine (succeed to reach!?)
root@localhost:~# ping 10.88.0.4
PING 10.88.0.3 (10.88.0.4) 56(84) bytes of data.
64 bytes from 10.88.0.4: icmp_seq=1 ttl=64 time=0.939 ms
64 bytes from 10.88.0.4: icmp_seq=2 ttl=64 time=1.12 ms
64 bytes from 10.88.0.4: icmp_seq=3 ttl=64 time=1.14 ms
^C
--- 10.88.0.4 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2007ms
rtt min/avg/max/mdev = 0.939/1.063/1.136/0.088 ms
# testing ping to some ip inside the subnet in podman machine
root@localhost:~# ping 10.88.0.4
PING 10.88.0.4 (10.88.0.4) 56(84) bytes of data.
64 bytes from 10.88.0.4: icmp_seq=1 ttl=64 time=0.816 ms
64 bytes from 10.88.0.4: icmp_seq=2 ttl=64 time=1.04 ms
^C
--- 10.88.0.4 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1043ms
rtt min/avg/max/mdev = 0.816/0.928/1.040/0.112 ms
# create a container inside the subnet
root@localhost:~# podman --log-level debug run \
--privileged -d \
--name testsleep alpine sleep 100000
INFO[0000] podman filtering at log level debug
DEBU[0000] Called run.PersistentPreRunE(podman --log-level debug run --privileged -d --name testsleep alpine sleep 100000)
DEBU[0000] Using conmon: "/usr/bin/conmon"
INFO[0000] Using sqlite as database backend
DEBU[0000] Using graph driver overlay
DEBU[0000] Using graph root /var/lib/containers/storage
DEBU[0000] Using run root /run/containers/storage
DEBU[0000] Using static dir /var/lib/containers/storage/libpod
DEBU[0000] Using tmp dir /run/libpod
DEBU[0000] Using volume path /var/lib/containers/storage/volumes
DEBU[0000] Using transient store: false
DEBU[0000] [graphdriver] trying provided driver "overlay"
DEBU[0000] overlay: imagestore=/usr/lib/containers/storage
DEBU[0000] Cached value indicated that overlay is supported
DEBU[0000] Cached value indicated that overlay is supported
DEBU[0000] Cached value indicated that metacopy is being used
DEBU[0000] NewControl(/var/lib/containers/storage/overlay): nextProjectID = 2923995931
DEBU[0000] Cached value indicated that native-diff is not being used
INFO[0000] Not using native diff for overlay, this may cause degraded performance for building images: kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
DEBU[0000] backingFs=xfs, projectQuotaSupported=true, useNativeDiff=false, usingMetacopy=true
DEBU[0000] Initializing event backend journald
DEBU[0000] Configured OCI runtime runj initialization failed: no valid executable found for OCI runtime runj: invalid argument
DEBU[0000] Configured OCI runtime ocijail initialization failed: no valid executable found for OCI runtime ocijail: invalid argument
DEBU[0000] Configured OCI runtime crun-vm initialization failed: no valid executable found for OCI runtime crun-vm: invalid argument
DEBU[0000] Configured OCI runtime runc initialization failed: no valid executable found for OCI runtime runc: invalid argument
DEBU[0000] Configured OCI runtime youki initialization failed: no valid executable found for OCI runtime youki: invalid argument
DEBU[0000] Configured OCI runtime krun initialization failed: no valid executable found for OCI runtime krun: invalid argument
DEBU[0000] Configured OCI runtime kata initialization failed: no valid executable found for OCI runtime kata: invalid argument
DEBU[0000] Configured OCI runtime runsc initialization failed: no valid executable found for OCI runtime runsc: invalid argument
DEBU[0000] Using OCI runtime "/usr/bin/crun"
INFO[0000] Setting parallel job count to 13
DEBU[0000] Pulling image alpine (policy: missing)
DEBU[0000] Looking up image "alpine" in local containers storage
DEBU[0000] Normalized platform linux/arm64 to {arm64 linux [] }
DEBU[0000] Loading registries configuration "/etc/containers/registries.conf"
DEBU[0000] Loading registries configuration "/etc/containers/registries.conf.d/000-shortnames.conf"
DEBU[0000] Loading registries configuration "/etc/containers/registries.conf.d/999-podman-machine.conf"
DEBU[0000] Trying "docker.io/library/alpine:latest" ...
DEBU[0000] parsed reference into "[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.imagestore=/usr/lib/containers/storage,overlay.mountopt=nodev,metacopy=on]@c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Found image "alpine" as "docker.io/library/alpine:latest" in local containers storage
DEBU[0000] Found image "alpine" as "docker.io/library/alpine:latest" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.imagestore=/usr/lib/containers/storage,overlay.mountopt=nodev,metacopy=on]@c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b)
DEBU[0000] exporting opaque data as blob "sha256:c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Looking up image "docker.io/library/alpine:latest" in local containers storage
DEBU[0000] Normalized platform linux/arm64 to {arm64 linux [] }
DEBU[0000] Trying "docker.io/library/alpine:latest" ...
DEBU[0000] parsed reference into "[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.imagestore=/usr/lib/containers/storage,overlay.mountopt=nodev,metacopy=on]@c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Found image "docker.io/library/alpine:latest" as "docker.io/library/alpine:latest" in local containers storage
DEBU[0000] Found image "docker.io/library/alpine:latest" as "docker.io/library/alpine:latest" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.imagestore=/usr/lib/containers/storage,overlay.mountopt=nodev,metacopy=on]@c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b)
DEBU[0000] exporting opaque data as blob "sha256:c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Looking up image "alpine" in local containers storage
DEBU[0000] Normalized platform linux/arm64 to {arm64 linux [] }
DEBU[0000] Trying "docker.io/library/alpine:latest" ...
DEBU[0000] parsed reference into "[overlay@/var/lib/containers/storage+/run/containers/storage:overlay.imagestore=/usr/lib/containers/storage,overlay.mountopt=nodev,metacopy=on]@c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Found image "alpine" as "docker.io/library/alpine:latest" in local containers storage
DEBU[0000] Found image "alpine" as "docker.io/library/alpine:latest" in local containers storage ([overlay@/var/lib/containers/storage+/run/containers/storage:overlay.imagestore=/usr/lib/containers/storage,overlay.mountopt=nodev,metacopy=on]@c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b)
DEBU[0000] exporting opaque data as blob "sha256:c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Inspecting image c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b
DEBU[0000] exporting opaque data as blob "sha256:c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] exporting opaque data as blob "sha256:c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Inspecting image c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b
DEBU[0000] Inspecting image c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b
DEBU[0000] Inspecting image c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b
DEBU[0000] using systemd mode: false
DEBU[0000] setting container name testsleep
DEBU[0000] No hostname set; container's hostname will default to runtime default
DEBU[0000] Loading seccomp profile from "/usr/share/containers/seccomp.json"
DEBU[0000] Successfully loaded network podman: &{podman 2f259bab93aaaaa2542ba43ef33eb990d0999ee1b9924b557b7be53c0b7a1bb9 bridge podman0 2024-09-21 23:41:20.673058802 +0800 CST [{{{10.88.0.0 ffff0000}} 10.88.0.1 <nil>}] [] false false false [] map[] map[] map[driver:host-local]}
DEBU[0000] Successfully loaded network testnet: &{testnet 6c8a00b6a713010096200beda3ecfe740958696ae3bb560b2097afdbafae44db bridge podman1 2024-09-22 23:51:16.012326351 +0800 CST [{{{10.89.0.0 ffffff00}} 10.89.0.1 <nil>}] [] false false true [] map[] map[] map[driver:host-local]}
DEBU[0000] Successfully loaded 2 networks
DEBU[0000] Allocated lock 0 for container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67
DEBU[0000] exporting opaque data as blob "sha256:c157a85ed455142fd79bff5dce951fd5f5b0d0c6e45e6f54cfd0c4e2bdec587b"
DEBU[0000] Cached value indicated that idmapped mounts for overlay are supported
DEBU[0000] SetQuota path=/var/lib/containers/storage/overlay/99caa37232be14e1991601e472663990d5e268852504f3ac65e29423b58f2544, size=0, inodes=0, projectID=2923995931
DEBU[0000] Created container "eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67"
DEBU[0000] Container "eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67" has work directory "/var/lib/containers/storage/overlay-containers/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67/userdata"
DEBU[0000] Container "eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67" has run directory "/run/containers/storage/overlay-containers/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67/userdata"
DEBU[0000] overlay: mount_data=lowerdir=/var/lib/containers/storage/overlay/l/UEYXCGS4AFIOQ4J4PEG5H3SN55,upperdir=/var/lib/containers/storage/overlay/99caa37232be14e1991601e472663990d5e268852504f3ac65e29423b58f2544/diff,workdir=/var/lib/containers/storage/overlay/99caa37232be14e1991601e472663990d5e268852504f3ac65e29423b58f2544/work,metacopy=on,context="system_u:object_r:container_file_t:s0:c1022,c1023"
DEBU[0000] Mounted container "eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67" at "/var/lib/containers/storage/overlay/99caa37232be14e1991601e472663990d5e268852504f3ac65e29423b58f2544/merged"
DEBU[0000] Created root filesystem for container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 at /var/lib/containers/storage/overlay/99caa37232be14e1991601e472663990d5e268852504f3ac65e29423b58f2544/merged
DEBU[0000] Made network namespace at /run/netns/netns-239887e0-30d5-da41-dc45-2260024c7189 for container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67
[DEBUG netavark::network::validation] Validating network namespace...
[DEBUG netavark::commands::setup] Setting up...
[INFO netavark::firewall] Using iptables firewall driver
[DEBUG netavark::network::bridge] Setup network podman
[DEBUG netavark::network::bridge] Container interface name: eth0 with IP addresses [10.88.0.2/16]
[DEBUG netavark::network::bridge] Bridge name: podman0 with IP addresses [10.88.0.1/16]
[DEBUG netavark::network::core_utils] Setting sysctl value for net.ipv4.ip_forward to 1
[DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/podman0/rp_filter to 2
[DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv6/conf/eth0/autoconf to 0
[DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/arp_notify to 1
[DEBUG netavark::network::core_utils] Setting sysctl value for /proc/sys/net/ipv4/conf/eth0/rp_filter to 2
[INFO netavark::network::netlink] Adding route (dest: 0.0.0.0/0 ,gw: 10.88.0.1, metric 100)
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-1D8721804F16F created on table nat
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_2 created on table filter
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_ISOLATION_3 created on table filter
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_INPUT created on table filter
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK_FORWARD created on table filter
[DEBUG netavark::firewall::varktables::helpers] rule -d 10.88.0.0/16 -j ACCEPT created on table nat and chain NETAVARK-1D8721804F16F
[DEBUG netavark::firewall::varktables::helpers] rule ! -d 224.0.0.0/4 -j MASQUERADE created on table nat and chain NETAVARK-1D8721804F16F
[DEBUG netavark::firewall::varktables::helpers] rule -s 10.88.0.0/16 -j NETAVARK-1D8721804F16F created on table nat and chain POSTROUTING
[DEBUG netavark::firewall::varktables::helpers] rule -p udp -s 10.88.0.0/16 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT
[DEBUG netavark::firewall::varktables::helpers] rule -p tcp -s 10.88.0.0/16 --dport 53 -j ACCEPT created on table filter and chain NETAVARK_INPUT
[DEBUG netavark::firewall::varktables::helpers] rule -m conntrack --ctstate INVALID -j DROP created on table filter and chain NETAVARK_FORWARD
[DEBUG netavark::firewall::varktables::helpers] rule -d 10.88.0.0/16 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT created on table filter and chain NETAVARK_FORWARD
[DEBUG netavark::firewall::varktables::helpers] rule -s 10.88.0.0/16 -j ACCEPT created on table filter and chain NETAVARK_FORWARD
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-SETMARK created on table nat
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-MASQ created on table nat
[DEBUG netavark::firewall::varktables::helpers] chain NETAVARK-HOSTPORT-DNAT created on table nat
[DEBUG netavark::firewall::varktables::helpers] rule -j MARK --set-xmark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-SETMARK
[DEBUG netavark::firewall::varktables::helpers] rule -j MASQUERADE -m comment --comment 'netavark portfw masq mark' -m mark --mark 0x2000/0x2000 created on table nat and chain NETAVARK-HOSTPORT-MASQ
[DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain PREROUTING
[DEBUG netavark::firewall::varktables::helpers] rule -j NETAVARK-HOSTPORT-DNAT -m addrtype --dst-type LOCAL created on table nat and chain OUTPUT
[DEBUG netavark::commands::setup] {
"podman": StatusBlock {
dns_search_domains: Some(
[],
),
dns_server_ips: Some(
[],
),
interfaces: Some(
{
"eth0": NetInterface {
mac_address: "3e:3c:3f:84:98:bc",
subnets: Some(
[
NetAddress {
gateway: Some(
10.88.0.1,
),
ipnet: 10.88.0.2/16,
},
],
),
},
},
),
},
}
[DEBUG netavark::commands::setup] Setup complete
DEBU[0000] /etc/system-fips does not exist on host, not mounting FIPS mode subscription
DEBU[0000] Setting Cgroups for container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 to machine.slice:libpod:eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67
DEBU[0000] reading hooks from /usr/share/containers/oci/hooks.d
DEBU[0000] Workdir "/" resolved to host path "/var/lib/containers/storage/overlay/99caa37232be14e1991601e472663990d5e268852504f3ac65e29423b58f2544/merged"
DEBU[0000] Created OCI spec for container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 at /var/lib/containers/storage/overlay-containers/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67/userdata/config.json
DEBU[0000] /usr/bin/conmon messages will be logged to syslog
DEBU[0000] running conmon: /usr/bin/conmon args="[--api-version 1 -c eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 -u eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 -r /usr/bin/crun -b /var/lib/containers/storage/overlay-containers/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67/userdata -p /run/containers/storage/overlay-containers/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67/userdata/pidfile -n testsleep --exit-dir /run/libpod/exits --persist-dir /run/libpod/persist/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 --full-attach -s -l journald --log-level debug --syslog --conmon-pidfile /run/containers/storage/overlay-containers/eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67/userdata/conmon.pid --exit-command /usr/bin/podman --exit-command-arg --root --exit-command-arg /var/lib/containers/storage --exit-command-arg --runroot --exit-command-arg /run/containers/storage --exit-command-arg --log-level --exit-command-arg debug --exit-command-arg --cgroup-manager --exit-command-arg systemd --exit-command-arg --tmpdir --exit-command-arg /run/libpod --exit-command-arg --network-config-dir --exit-command-arg --exit-command-arg --network-backend --exit-command-arg netavark --exit-command-arg --volumepath --exit-command-arg /var/lib/containers/storage/volumes --exit-command-arg --db-backend --exit-command-arg sqlite --exit-command-arg --transient-store=false --exit-command-arg --runtime --exit-command-arg crun --exit-command-arg --storage-driver --exit-command-arg overlay --exit-command-arg --storage-opt --exit-command-arg overlay.imagestore=/usr/lib/containers/storage --exit-command-arg --storage-opt --exit-command-arg overlay.mountopt=nodev,metacopy=on --exit-command-arg --events-backend --exit-command-arg journald --exit-command-arg --syslog --exit-command-arg container --exit-command-arg cleanup --exit-command-arg eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67]"
INFO[0000] Running conmon under slice machine.slice and unitName libpod-conmon-eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67.scope
DEBU[0000] Received: 3501
INFO[0000] Got Conmon PID as 3499
DEBU[0000] Created container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 in OCI runtime
DEBU[0000] Adding nameserver(s) from network status of '[]'
DEBU[0000] Adding search domain(s) from network status of '[]'
DEBU[0000] found local resolver, using "/run/systemd/resolve/resolv.conf" to get the nameservers
DEBU[0000] Starting container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67 with command [sleep 100000]
DEBU[0000] Started container eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67
DEBU[0000] Notify sent successfully
eed5632671160524d9551a845f779aeba81852573ab833bdb28bc353018fff67
DEBU[0000] Called run.PersistentPostRunE(podman --log-level debug run --privileged -d --name testsleep alpine sleep 100000)
DEBU[0000] Shutting down engines
INFO[0000] Received shutdown.Stop(), terminating! PID=3386
# check container ip address
root@localhost:~# podman container inspect 9fb | grep IPAddress
"IPAddress": "10.88.0.3",
"IPAddress": "10.88.0.3",
root@localhost:~# ping 10.88.0.3
PING 10.88.0.3 (10.88.0.3) 56(84) bytes of data.
64 bytes from 10.88.0.3: icmp_seq=1 ttl=64 time=0.399 ms
64 bytes from 10.88.0.3: icmp_seq=2 ttl=64 time=0.243 ms
64 bytes from 10.88.0.3: icmp_seq=3 ttl=64 time=0.193 ms
^C
--- 10.88.0.3 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2062ms
rtt min/avg/max/mdev = 0.193/0.278/0.399/0.087 ms
# testing ping again in podman machine after container started to run (it is not reachable now ...)
root@localhost:~# ping 10.88.0.4
PING 10.88.0.4 (10.88.0.4) 56(84) bytes of data.
From 10.88.0.4 icmp_seq=1 Destination Host Unreachable
From 10.88.0.4 icmp_seq=5 Destination Host Unreachable
ping: sendmsg: No route to host
From 10.88.0.4 icmp_seq=6 Destination Host Unreachable
From 10.88.0.4 icmp_seq=8 Destination Host Unreachable
From 10.88.0.4 icmp_seq=9 Destination Host Unreachable
From 10.88.0.4 icmp_seq=10 Destination Host Unreachable
^C
--- 10.88.0.4 ping statistics ---
11 packets transmitted, 0 received, +6 errors, 100% packet loss, time 10247ms
pipe 4
My Environment and SettingsmacOS 14.6.1, Apple M1
~ ❯ podman info 11:28:20
host:
arch: arm64
buildahVersion: 1.37.2
cgroupControllers:
- cpuset
- cpu
- io
- memory
- pids
- rdma
- misc
cgroupManager: systemd
cgroupVersion: v2
conmon:
package: conmon-2.1.10-1.fc40.aarch64
path: /usr/bin/conmon
version: 'conmon version 2.1.10, commit: '
cpuUtilization:
idlePercent: 99.97
systemPercent: 0.02
userPercent: 0.01
cpus: 4
databaseBackend: sqlite
distribution:
distribution: fedora
variant: coreos
version: "40"
eventLogger: journald
freeLocks: 2046
hostname: localhost.localdomain
idMappings:
gidmap: null
uidmap: null
kernel: 6.9.12-200.fc40.aarch64
linkmode: dynamic
logDriver: journald
memFree: 1576660992
memTotal: 2044641280
networkBackend: netavark
networkBackendInfo:
backend: netavark
dns:
package: aardvark-dns-1.12.1-1.20240819115418474394.main.6.gc2cd0be.fc40.aarch64
path: /usr/libexec/podman/aardvark-dns
version: aardvark-dns 1.13.0-dev
package: netavark-1.12.1-1.20240819170533312370.main.26.g4358fd3.fc40.aarch64
path: /usr/libexec/podman/netavark
version: netavark 1.13.0-dev
ociRuntime:
name: crun
package: crun-1.16-1.20240813143753154884.main.16.g26c7687.fc40.aarch64
path: /usr/bin/crun
version: |-
crun version UNKNOWN
commit: 158b340ec38e187abee05cbf3f27b40be2b564d0
rundir: /run/crun
spec: 1.0.0
+SYSTEMD +SELINUX +APPARMOR +CAP +SECCOMP +EBPF +CRIU +LIBKRUN +WASM:wasmedge +YAJL
os: linux
pasta:
executable: /usr/bin/pasta
package: passt-0^20240726.g57a21d2-1.fc40.aarch64
version: |
pasta 0^20240726.g57a21d2-1.fc40.aarch64-pasta
Copyright Red Hat
GNU General Public License, version 2 or later
<https://www.gnu.org/licenses/old-licenses/gpl-2.0.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
remoteSocket:
exists: true
path: /run/podman/podman.sock
rootlessNetworkCmd: pasta
security:
apparmorEnabled: false
capabilities: CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID,CAP_SYS_CHROOT
rootless: false
seccompEnabled: true
seccompProfilePath: /usr/share/containers/seccomp.json
selinuxEnabled: true
serviceIsRemote: true
slirp4netns:
executable: /usr/bin/slirp4netns
package: slirp4netns-1.2.2-2.fc40.aarch64
version: |-
slirp4netns version 1.2.2
commit: 0ee2d87523e906518d34a6b423271e4826f71faf
libslirp: 4.7.0
SLIRP_CONFIG_VERSION_MAX: 4
libseccomp: 2.5.5
swapFree: 0
swapTotal: 0
uptime: 13h 52m 15.00s (Approximately 0.54 days)
variant: v8
plugins:
authorization: null
log:
- k8s-file
- none
- passthrough
- journald
network:
- bridge
- macvlan
- ipvlan
volume:
- local
registries:
search:
- docker.io
store:
configFile: /usr/share/containers/storage.conf
containerStore:
number: 0
paused: 0
running: 0
stopped: 0
graphDriverName: overlay
graphOptions:
overlay.imagestore: /usr/lib/containers/storage
overlay.mountopt: nodev,metacopy=on
graphRoot: /var/lib/containers/storage
graphRootAllocated: 106769133568
graphRootUsed: 5540773888
graphStatus:
Backing Filesystem: xfs
Native Overlay Diff: "false"
Supports d_type: "true"
Supports shifting: "true"
Supports volatile: "true"
Using metacopy: "true"
imageCopyTmpDir: /var/tmp
imageStore:
number: 2
runRoot: /run/containers/storage
transientStore: false
volumePath: /var/lib/containers/storage/volumes
version:
APIVersion: 5.2.2
Built: 1724198400
BuiltTime: Wed Aug 21 08:00:00 2024
GitCommit: ""
GoVersion: go1.22.6
Os: linux
OsArch: linux/arm64
Version: 5.2.2
~ ❯ podman machine inspect 2m 55s 11:28:14
[
{
"ConfigDir": {
"Path": "/Users/my_username/.config/containers/podman/machine/applehv"
},
"ConnectionInfo": {
"PodmanSocket": {
"Path": "/var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/podman-machine-default-api.sock"
},
"PodmanPipe": null
},
"Created": "2024-09-21T23:19:06.808593+08:00",
"LastUp": "2024-10-10T17:10:53.115578+08:00",
"Name": "podman-machine-default",
"Resources": {
"CPUs": 4,
"DiskSize": 100,
"Memory": 2048,
"USBs": []
},
"SSHConfig": {
"IdentityPath": "/Users/my_username/.local/share/containers/podman/machine/machine",
"Port": 57731,
"RemoteUsername": "core"
},
"State": "running",
"UserModeNetworking": true,
"Rootful": true,
"Rosetta": true
}
]
~ ❯ /opt/podman/bin/vfkit -v 11:34:58
vfkit version: 0.5.1
∅ /opt/podman ❯ /opt/podman/bin/gvproxy -version 11:35:41
gvproxy version v0.7.4 Network info inside podman machine root@localhost:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host noprefixroute
valid_lft forever preferred_lft forever
2: enp0s1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 5a:94:ef:e4:0c:ee brd ff:ff:ff:ff:ff:ff
inet 192.168.127.2/24 brd 192.168.127.255 scope global dynamic noprefixroute enp0s1
valid_lft 3555sec preferred_lft 3555sec
inet6 fe80::72f1:23d:f1c:949c/64 scope link noprefixroute
valid_lft forever preferred_lft forever
root@localhost:~# ip route
default via 192.168.127.1 dev enp0s1 proto dhcp src 192.168.127.2 metric 100
192.168.127.0/24 dev enp0s1 proto kernel scope link src 192.168.127.2 metric 100
my_username 98557 0.0 0.1 411950960 5264 s000 S 5:11PM 0:00.26
/opt/podman/bin/vfkit
--cpus 4
--memory 2048
--bootloader efi,variable-store=/Users/my_username/.local/share/containers/podman/machine/applehv/efi-bl-podman-machine-default,create
--device virtio-blk,path=/Users/my_username/.local/share/containers/podman/machine/applehv/podman-machine-default-arm64.raw
--device virtio-rng
--device virtio-vsock,port=1025,socketURL=/var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/podman-machine-default.sock,listen
--device virtio-serial,logFilePath=/var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/podman-machine-default.log
--device rosetta,mountTag=rosetta,install
--device virtio-net,unixSocketPath=/var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/podman-machine-default-gvproxy.sock,mac=5a:94:ef:e4:0c:ee
--device virtio-fs,sharedDir=/Users,mountTag=a2a0ee2c717462feb1de2f5afd59de5fd2d8
--device virtio-fs,sharedDir=/private,mountTag=71708eb255bc230cd7c91dd26f7667a7b938
--device virtio-fs,sharedDir=/var/folders,mountTag=a0bb3a2c8b0b02ba5958b0576f0d6530e104
--restful-uri tcp://localhost:62470
my_username 98556 0.0 0.1 411857392 7600 s000 S 5:10PM 0:02.82
/opt/podman/bin/gvproxy
-mtu 1500
-ssh-port 57731
-listen-vfkit unixgram:///var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/podman-machine-default-gvproxy.sock
-forward-sock /var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/podman-machine-default-api.sock
-forward-dest /run/podman/podman.sock
-forward-user root
-forward-identity /Users/my_username/.local/share/containers/podman/machine/machine
-pid-file /var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/gvproxy.pid
-log-file /var/folders/q7/0zh2zxxs1sg3w3ssw3fwps0c0000gn/T/podman/gvproxy.log
my questions(1) should I be able to Please help provide some possible directions for my further exploration of my confusions. If I have forgotten to provide any information that would be useful for you to understand my question, please let me know. thanks~ :) |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 1 reply
-
https://github.com/containers/gvisor-tap-vsock/blob/main/README.md#limitations gvproxy the process that handles the VM networking does not seem to handle ICMP well. So that seems to be a limitation there and would need to be fixed there.
Because there is no bridge interface/routing rule when there are no containers running so all the taffic exits via default gw thus gvproxy. Once the container is running the 10.88.0.0/16 or whatever subnets your network uses has a local route on the bridge so traffic is no longer send to the default gw. |
Beta Was this translation helpful? Give feedback.
https://github.com/containers/gvisor-tap-vsock/blob/main/README.md#limitations
gvproxy the process that handles the VM networking does not seem to handle ICMP well. So that seems to be a limitation there and would need to be fixed there.
Because there is no bridge interface/routing rule when there are no containers running so all the taffic exits via default gw thus gvproxy. Once the container is running the 10.88.0.0/16 or whatever subnets your network uses has a local route on the bridge so traffic is no longer send to the default gw.